OSDN Git Service

feat: init cross_tx keepers (#146) fed_doc
authorHAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Mon, 10 Jun 2019 06:56:23 +0000 (14:56 +0800)
committerPaladz <yzhu101@uottawa.ca>
Mon, 10 Jun 2019 06:56:23 +0000 (14:56 +0800)
* add config

* add config

* fix typos

109 files changed:
cmd/fedd/main.go
docs/federation/README.md
federation/config/config.go [new file with mode: 0644]
federation/database/db.go [new file with mode: 0644]
federation/service/node.go [new file with mode: 0644]
federation/synchron/mainchain_keeper.go [new file with mode: 0644]
federation/synchron/sidechain_keeper.go [new file with mode: 0644]
federation/util/http_util.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/.travis.yml [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh [new file with mode: 0755]
vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh [new file with mode: 0755]
vendor/github.com/go-sql-driver/mysql/AUTHORS [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/LICENSE [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/README.md [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/appengine.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/auth.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/auth_test.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/benchmark_test.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/buffer.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/collations.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/connection.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/connection_test.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/const.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/driver.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/driver_test.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/dsn.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/dsn_test.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/errors.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/errors_test.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/fields.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/infile.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/packets.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/packets_test.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/result.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/rows.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/statement.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/statement_test.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/transaction.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/utils.go [new file with mode: 0644]
vendor/github.com/go-sql-driver/mysql/utils_test.go [new file with mode: 0644]
vendor/github.com/jinzhu/gorm/.codeclimate.yml [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/.github/ISSUE_TEMPLATE.md [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/.github/PULL_REQUEST_TEMPLATE.md [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/.gitignore [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/License [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/README.md [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/association.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/association_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback_create.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback_delete.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback_query.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback_query_preload.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback_row_query.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback_save.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback_system_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callback_update.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/callbacks_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/create_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/customize_column_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/delete_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialect.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialect_common.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialect_mysql.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialect_postgres.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialect_sqlite3.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/docker-compose.yml [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/embedded_struct_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/errors.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/errors_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/field.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/field_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/interface.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/join_table_handler.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/join_table_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/logger.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/main.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/main_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/migration_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/model.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/model_struct.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/naming.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/naming_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/pointer_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/polymorphic_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/preload_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/query_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/scaner_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/scope.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/scope_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/search.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/search_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/test_all.sh [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/update_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/gorm/utils.go [new file with mode: 0644]
vendor/github.com/jinzhu/gorm/wercker.yml [new file with mode: 0755]
vendor/github.com/jinzhu/inflection/LICENSE [new file with mode: 0755]
vendor/github.com/jinzhu/inflection/README.md [new file with mode: 0755]
vendor/github.com/jinzhu/inflection/inflections.go [new file with mode: 0755]
vendor/github.com/jinzhu/inflection/inflections_test.go [new file with mode: 0755]
vendor/github.com/jinzhu/inflection/wercker.yml [new file with mode: 0755]

index da29a2c..2ba8ec6 100644 (file)
@@ -1,4 +1,27 @@
 package main
 
+import (
+       "sync"
+
+       log "github.com/sirupsen/logrus"
+
+       "github.com/vapor/federation/config"
+       "github.com/vapor/federation/database"
+       "github.com/vapor/federation/synchron"
+)
+
 func main() {
+       cfg := config.NewConfig()
+       db, err := database.NewMySQLDB(cfg.MySQLConfig)
+       if err != nil {
+               log.WithField("err", err).Panic("initialize mysql db error")
+       }
+
+       go synchron.NewMainchainKeeper(db, &cfg.Mainchain).Run()
+       go synchron.NewSidechainKeeper(db, &cfg.Sidechain).Run()
+
+       // keep the main func running in case of terminating goroutines
+       var wg sync.WaitGroup
+       wg.Add(1)
+       wg.Wait()
 }
index d606fc0..c6e05b9 100644 (file)
@@ -5,4 +5,57 @@ To run a federation node, you will need to:
 1. init a MySQL database with this [schema](./federation.sql);
 2. run a `bytomd` node;
 3. run a `vapord` node and import the federation private key;
-4. and last but not least, run a `fedd` node.
\ No newline at end of file
+4. and last but not least, run a `fedd` node with a `fed_cfg.json`.
+
+A `fed_cfg.json` would look like this:
+
+```json
+{
+    "gin-gonic" : {
+        "listening_port" : 3000,
+        "is_release_mode": false
+    },
+    "mysql" : {
+        "connection" : {
+            "host": "127.0.0.1",
+            "port": 3306,
+            "username": "root",
+            "password": "",
+            "database": "federation"
+        },
+        "log_mode" : true
+    },
+    "warders" : [
+        {
+            "position" : 1,
+            "xpub" : "7f23aae65ee4307c38d342699e328f21834488e18191ebd66823d220b5a58303496c9d09731784372bade78d5e9a4a6249b2cfe2e3a85464e5a4017aa5611e47",
+            "host_port" : "192.168.0.2:3000",
+            "is_local" : false
+        },
+        {
+            "position" : 1,
+            "xpub" : "585e20143db413e45fbc82f03cb61f177e9916ef1df0012daa8cbf6dbb1025ce8f98e51ae319327b63505b64fdbbf6d36ef916d79e6dd67d51b0bfe76fe544c5",
+            "host_port" : "127.0.0.1:3000",
+            "is_local" : true
+        },
+        {
+            "position" : 1,
+            "xpub" : "b58170b51ca61604028ba1cb412377dfc2bc6567c0afc84c83aae1c0c297d0227ccf568561df70851f4144bbf069b525129f2434133c145e35949375b22a6c9d",
+            "host_port" : "192.168.0.3:3000",
+            "is_local" : false
+        }
+    ],
+    "mainchain" : {
+        "name" : "bytom",
+        "confirmations" : 10,
+        "upstream" : "http://127.0.0.1:9888",
+        "sync_seconds" : 150
+    },
+    "sidechain" : {
+        "name" : "vapor",
+        "confirmations" : 100,
+        "upstream" : "http://127.0.0.1:9889",
+        "sync_seconds" : 5
+    }
+}
+```
diff --git a/federation/config/config.go b/federation/config/config.go
new file mode 100644 (file)
index 0000000..270f9b8
--- /dev/null
@@ -0,0 +1,73 @@
+package config
+
+import (
+       "encoding/json"
+       "os"
+
+       log "github.com/sirupsen/logrus"
+
+       "github.com/vapor/crypto/ed25519/chainkd"
+)
+
+func NewConfig() *Config {
+       if len(os.Args) <= 1 {
+               log.Fatal("Please setup the config file path")
+       }
+
+       return NewConfigWithPath(os.Args[1])
+}
+
+func NewConfigWithPath(path string) *Config {
+       configFile, err := os.Open(path)
+       if err != nil {
+               log.WithFields(log.Fields{"err": err, "file_path": os.Args[1]}).Fatal("fail to open config file")
+       }
+       defer configFile.Close()
+
+       cfg := &Config{}
+       if err := json.NewDecoder(configFile).Decode(cfg); err != nil {
+               log.WithField("err", err).Fatal("fail to decode config file")
+       }
+
+       return cfg
+}
+
+type Config struct {
+       GinGonic    GinGonic    `json:"gin-gonic"`
+       MySQLConfig MySQLConfig `json:"mysql"`
+       Warders     []Warder    `json:"warders"`
+       Mainchain   Chain       `json:"mainchain"`
+       Sidechain   Chain       `json:"sidechain"`
+}
+
+type GinGonic struct {
+       ListeningPort uint64 `json:"listening_port"`
+       IsReleaseMode bool   `json:"is_release_mode"`
+}
+
+type MySQLConfig struct {
+       Connection MySQLConnection `json:"connection"`
+       LogMode    bool            `json:"log_mode"`
+}
+
+type MySQLConnection struct {
+       Host     string `json:"host"`
+       Port     uint   `json:"port"`
+       Username string `json:"username"`
+       Password string `json:"password"`
+       DbName   string `json:"database"`
+}
+
+type Warder struct {
+       Position uint8        `json:"position"`
+       XPub     chainkd.XPub `json:"xpub"`
+       HostPort string       `json:"host_port"`
+       IsLocal  bool         `json:"is_local"`
+}
+
+type Chain struct {
+       Name          string `json:"name"`
+       Upstream      string `json:"upstream"`
+       SyncSeconds   uint64 `json:"sync_seconds"`
+       Confirmations string `json:"confirmations"`
+}
diff --git a/federation/database/db.go b/federation/database/db.go
new file mode 100644 (file)
index 0000000..2005b37
--- /dev/null
@@ -0,0 +1,27 @@
+package database
+
+import (
+       "fmt"
+
+       _ "github.com/go-sql-driver/mysql"
+       "github.com/jinzhu/gorm"
+
+       "github.com/vapor/errors"
+       "github.com/vapor/federation/config"
+)
+
+func NewMySQLDB(cfg config.MySQLConfig) (*gorm.DB, error) {
+       dsnTemplate := "%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=true&loc=Local"
+       dsn := fmt.Sprintf(dsnTemplate, cfg.Connection.Username, cfg.Connection.Password, cfg.Connection.Host, cfg.Connection.Port, cfg.Connection.DbName)
+       db, err := gorm.Open("mysql", dsn)
+       if err != nil {
+               return nil, errors.Wrap(err, "open db cluster")
+       }
+
+       db.LogMode(cfg.LogMode)
+       if err = db.DB().Ping(); err != nil {
+               return nil, errors.Wrap(err, "ping db")
+       }
+
+       return db, nil
+}
diff --git a/federation/service/node.go b/federation/service/node.go
new file mode 100644 (file)
index 0000000..6ee376c
--- /dev/null
@@ -0,0 +1,83 @@
+package service
+
+import (
+       "encoding/json"
+
+       "github.com/vapor/errors"
+       "github.com/vapor/federation/util"
+       "github.com/vapor/protocol/bc"
+       "github.com/vapor/protocol/bc/types"
+)
+
+// Node can invoke the api which provide by the full node server
+type Node struct {
+       ip string
+}
+
+// Node create a api client with target server
+func NewNode(ip string) *Node {
+       return &Node{ip: ip}
+}
+
+func (n *Node) GetBlockByHash(hash string) (string, *bc.TransactionStatus, error) {
+       return n.getRawBlock(&getRawBlockReq{BlockHash: hash})
+}
+
+func (n *Node) GetBlockByHeight(height uint64) (string, *bc.TransactionStatus, error) {
+       return n.getRawBlock(&getRawBlockReq{BlockHeight: height})
+}
+
+type getBlockCountResp struct {
+       BlockCount uint64 `json:"block_count"`
+}
+
+func (n *Node) GetBlockCount() (uint64, error) {
+       url := "/get-block-count"
+       res := &getBlockCountResp{}
+       return res.BlockCount, n.request(url, nil, res)
+}
+
+type getRawBlockReq struct {
+       BlockHeight uint64 `json:"block_height"`
+       BlockHash   string `json:"block_hash"`
+}
+
+type getRawBlockResp struct {
+       RawBlock string `json:"raw_block"`
+       // TransactionStatus has same marshalling rule for both bytom and vapor
+       TransactionStatus *bc.TransactionStatus `json:"transaction_status"`
+}
+
+func (n *Node) getRawBlock(req *getRawBlockReq) (string, *bc.TransactionStatus, error) {
+       url := "/get-raw-block"
+       payload, err := json.Marshal(req)
+       if err != nil {
+               return "", nil, errors.Wrap(err, "json marshal")
+       }
+
+       res := &getRawBlockResp{}
+       return res.RawBlock, res.TransactionStatus, n.request(url, payload, res)
+}
+
+type submitTxReq struct {
+       Tx *types.Tx `json:"raw_transaction"`
+}
+
+type response struct {
+       Status    string          `json:"status"`
+       Data      json.RawMessage `json:"data"`
+       ErrDetail string          `json:"error_detail"`
+}
+
+func (n *Node) request(url string, payload []byte, respData interface{}) error {
+       resp := &response{}
+       if err := util.Post(n.ip+url, payload, resp); err != nil {
+               return err
+       }
+
+       if resp.Status != "success" {
+               return errors.New(resp.ErrDetail)
+       }
+
+       return json.Unmarshal(resp.Data, respData)
+}
diff --git a/federation/synchron/mainchain_keeper.go b/federation/synchron/mainchain_keeper.go
new file mode 100644 (file)
index 0000000..4e30d0b
--- /dev/null
@@ -0,0 +1,26 @@
+package synchron
+
+import (
+       "github.com/jinzhu/gorm"
+
+       "github.com/vapor/federation/config"
+       "github.com/vapor/federation/service"
+)
+
+type mainchainKeeper struct {
+       cfg       *config.Chain
+       db        *gorm.DB
+       node      *service.Node
+       chainName string
+}
+
+func NewMainchainKeeper(db *gorm.DB, chainCfg *config.Chain) *mainchainKeeper {
+       return &mainchainKeeper{
+               cfg:       chainCfg,
+               db:        db,
+               node:      service.NewNode(chainCfg.Upstream),
+               chainName: chainCfg.Name,
+       }
+}
+
+func (m *mainchainKeeper) Run() {}
diff --git a/federation/synchron/sidechain_keeper.go b/federation/synchron/sidechain_keeper.go
new file mode 100644 (file)
index 0000000..10c27d0
--- /dev/null
@@ -0,0 +1,26 @@
+package synchron
+
+import (
+       "github.com/jinzhu/gorm"
+
+       "github.com/vapor/federation/config"
+       "github.com/vapor/federation/service"
+)
+
+type sidechainKeeper struct {
+       cfg       *config.Chain
+       db        *gorm.DB
+       node      *service.Node
+       chainName string
+}
+
+func NewSidechainKeeper(db *gorm.DB, chainCfg *config.Chain) *sidechainKeeper {
+       return &sidechainKeeper{
+               cfg:       chainCfg,
+               db:        db,
+               node:      service.NewNode(chainCfg.Upstream),
+               chainName: chainCfg.Name,
+       }
+}
+
+func (s *sidechainKeeper) Run() {}
diff --git a/federation/util/http_util.go b/federation/util/http_util.go
new file mode 100644 (file)
index 0000000..b6a1cb3
--- /dev/null
@@ -0,0 +1,50 @@
+package util
+
+import (
+       "bytes"
+       "encoding/json"
+       "io/ioutil"
+       "net/http"
+)
+
+func Get(url string, result interface{}) error {
+       client := &http.Client{}
+       resp, err := client.Get(url)
+       if err != nil {
+               return err
+       }
+
+       defer resp.Body.Close()
+       body, err := ioutil.ReadAll(resp.Body)
+       if err != nil {
+               return err
+       }
+
+       return json.Unmarshal(body, result)
+}
+
+func Post(url string, payload []byte, result interface{}) error {
+       req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload))
+       if err != nil {
+               return err
+       }
+
+       req.Header.Set("Content-Type", "application/json")
+       client := &http.Client{}
+       resp, err := client.Do(req)
+       if err != nil {
+               return err
+       }
+
+       defer resp.Body.Close()
+       if result == nil {
+               return nil
+       }
+
+       body, err := ioutil.ReadAll(resp.Body)
+       if err != nil {
+               return err
+       }
+
+       return json.Unmarshal(body, result)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml
new file mode 100644 (file)
index 0000000..47dd289
--- /dev/null
@@ -0,0 +1,106 @@
+sudo: false
+language: go
+go:
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+  - master
+
+before_install:
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/mattn/goveralls
+
+before_script:
+  - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
+  - sudo service mysql restart
+  - .travis/wait_mysql.sh
+  - mysql -e 'create database gotest;'
+
+matrix:
+  include:
+    - env: DB=MYSQL8
+      sudo: required
+      dist: trusty
+      go: 1.10.x
+      services:
+        - docker
+      before_install:
+        - go get golang.org/x/tools/cmd/cover
+        - go get github.com/mattn/goveralls
+        - docker pull mysql:8.0
+        - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+          mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+        - cp .travis/docker.cnf ~/.my.cnf
+        - .travis/wait_mysql.sh
+      before_script:
+        - export MYSQL_TEST_USER=gotest
+        - export MYSQL_TEST_PASS=secret
+        - export MYSQL_TEST_ADDR=127.0.0.1:3307
+        - export MYSQL_TEST_CONCURRENT=1
+
+    - env: DB=MYSQL57
+      sudo: required
+      dist: trusty
+      go: 1.10.x
+      services:
+        - docker
+      before_install:
+        - go get golang.org/x/tools/cmd/cover
+        - go get github.com/mattn/goveralls
+        - docker pull mysql:5.7
+        - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+          mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+        - cp .travis/docker.cnf ~/.my.cnf
+        - .travis/wait_mysql.sh
+      before_script:
+        - export MYSQL_TEST_USER=gotest
+        - export MYSQL_TEST_PASS=secret
+        - export MYSQL_TEST_ADDR=127.0.0.1:3307
+        - export MYSQL_TEST_CONCURRENT=1
+
+    - env: DB=MARIA55
+      sudo: required
+      dist: trusty
+      go: 1.10.x
+      services:
+        - docker
+      before_install:
+        - go get golang.org/x/tools/cmd/cover
+        - go get github.com/mattn/goveralls
+        - docker pull mariadb:5.5
+        - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+          mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+        - cp .travis/docker.cnf ~/.my.cnf
+        - .travis/wait_mysql.sh
+      before_script:
+        - export MYSQL_TEST_USER=gotest
+        - export MYSQL_TEST_PASS=secret
+        - export MYSQL_TEST_ADDR=127.0.0.1:3307
+        - export MYSQL_TEST_CONCURRENT=1
+
+    - env: DB=MARIA10_1
+      sudo: required
+      dist: trusty
+      go: 1.10.x
+      services:
+        - docker
+      before_install:
+        - go get golang.org/x/tools/cmd/cover
+        - go get github.com/mattn/goveralls
+        - docker pull mariadb:10.1
+        - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+          mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+        - cp .travis/docker.cnf ~/.my.cnf
+        - .travis/wait_mysql.sh
+      before_script:
+        - export MYSQL_TEST_USER=gotest
+        - export MYSQL_TEST_PASS=secret
+        - export MYSQL_TEST_ADDR=127.0.0.1:3307
+        - export MYSQL_TEST_CONCURRENT=1
+
+script:
+  - go test -v -covermode=count -coverprofile=coverage.out
+  - go vet ./...
+  - .travis/gofmt.sh
+after_script:
+  - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf b/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
new file mode 100644 (file)
index 0000000..e57754e
--- /dev/null
@@ -0,0 +1,5 @@
+[client]
+user = gotest
+password = secret
+host = 127.0.0.1
+port = 3307
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh b/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
new file mode 100755 (executable)
index 0000000..9bf0d16
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -ev
+
+# Only check for go1.10+ since the gofmt style changed
+if [[ $(go version) =~ go1\.([0-9]+) ]] && ((${BASH_REMATCH[1]} >= 10)); then
+    test -z "$(gofmt -d -s . | tee /dev/stderr)"
+fi
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh b/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
new file mode 100755 (executable)
index 0000000..e87993e
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+while :
+do
+    if mysql -e 'select version()' 2>&1 | grep 'version()\|ERROR 2059 (HY000):'; then
+        break
+    fi
+    sleep 3
+done
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
new file mode 100644 (file)
index 0000000..fbe4ec4
--- /dev/null
@@ -0,0 +1,90 @@
+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
+
+# If you are submitting a patch, please add your name or the name of the
+# organization which holds the copyright to this list in alphabetical order.
+
+# Names should be added to this file as
+#      Name <email address>
+# The email address is not required for organizations.
+# Please keep the list sorted.
+
+
+# Individual Persons
+
+Aaron Hopkins <go-sql-driver at die.net>
+Achille Roussel <achille.roussel at gmail.com>
+Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
+Andrew Reid <andrew.reid at tixtrack.com>
+Arne Hormann <arnehormann at gmail.com>
+Asta Xie <xiemengjun at gmail.com>
+Bulat Gaifullin <gaifullinbf at gmail.com>
+Carlos Nieto <jose.carlos at menteslibres.net>
+Chris Moos <chris at tech9computers.com>
+Craig Wilson <craiggwilson at gmail.com>
+Daniel Montoya <dsmontoyam at gmail.com>
+Daniel Nichter <nil at codenode.com>
+Daniël van Eeden <git at myname.nl>
+Dave Protasowski <dprotaso at gmail.com>
+DisposaBoy <disposaboy at dby.me>
+Egor Smolyakov <egorsmkv at gmail.com>
+Evan Shaw <evan at vendhq.com>
+Frederick Mayle <frederickmayle at gmail.com>
+Gustavo Kristic <gkristic at gmail.com>
+Hajime Nakagami <nakagami at gmail.com>
+Hanno Braun <mail at hannobraun.com>
+Henri Yandell <flamefew at gmail.com>
+Hirotaka Yamamoto <ymmt2005 at gmail.com>
+ICHINOSE Shogo <shogo82148 at gmail.com>
+INADA Naoki <songofacandy at gmail.com>
+Jacek Szwec <szwec.jacek at gmail.com>
+James Harr <james.harr at gmail.com>
+Jeff Hodges <jeff at somethingsimilar.com>
+Jeffrey Charles <jeffreycharles at gmail.com>
+Jian Zhen <zhenjl at gmail.com>
+Joshua Prunier <joshua.prunier at gmail.com>
+Julien Lefevre <julien.lefevr at gmail.com>
+Julien Schmidt <go-sql-driver at julienschmidt.com>
+Justin Li <jli at j-li.net>
+Justin Nuß <nuss.justin at gmail.com>
+Kamil Dziedzic <kamil at klecza.pl>
+Kevin Malachowski <kevin at chowski.com>
+Kieron Woodhouse <kieron.woodhouse at infosum.com>
+Lennart Rudolph <lrudolph at hmc.edu>
+Leonardo YongUk Kim <dalinaum at gmail.com>
+Linh Tran Tuan <linhduonggnu at gmail.com>
+Lion Yang <lion at aosc.xyz>
+Luca Looz <luca.looz92 at gmail.com>
+Lucas Liu <extrafliu at gmail.com>
+Luke Scott <luke at webconnex.com>
+Maciej Zimnoch <maciej.zimnoch at codilime.com>
+Michael Woolnough <michael.woolnough at gmail.com>
+Nicola Peduzzi <thenikso at gmail.com>
+Olivier Mengué <dolmen at cpan.org>
+oscarzhao <oscarzhaosl at gmail.com>
+Paul Bonser <misterpib at gmail.com>
+Peter Schultz <peter.schultz at classmarkets.com>
+Rebecca Chin <rchin at pivotal.io>
+Reed Allman <rdallman10 at gmail.com>
+Richard Wilkes <wilkes at me.com>
+Robert Russell <robert at rrbrussell.com>
+Runrioter Wung <runrioter at gmail.com>
+Shuode Li <elemount at qq.com>
+Soroush Pour <me at soroushjp.com>
+Stan Putrya <root.vagner at gmail.com>
+Stanley Gunawan <gunawan.stanley at gmail.com>
+Thomas Wodarek <wodarekwebpage at gmail.com>
+Xiangyu Hu <xiangyu.hu at outlook.com>
+Xiaobing Jiang <s7v7nislands at gmail.com>
+Xiuming Chen <cc at cxm.cc>
+Zhenye Xie <xiezhenye at gmail.com>
+
+# Organizations
+
+Barracuda Networks, Inc.
+Counting Ltd.
+Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Percona LLC
+Pivotal Inc.
+Stripe Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
new file mode 100644 (file)
index 0000000..2d87d74
--- /dev/null
@@ -0,0 +1,167 @@
+## Version 1.4 (2018-06-03)
+
+Changes:
+
+ - Documentation fixes (#530, #535, #567)
+ - Refactoring (#575, #579, #580, #581, #603, #615, #704)
+ - Cache column names (#444)
+ - Sort the DSN parameters in DSNs generated from a config (#637)
+ - Allow native password authentication by default (#644)
+ - Use the default port if it is missing in the DSN (#668)
+ - Removed the `strict` mode (#676)
+ - Do not query `max_allowed_packet` by default (#680)
+ - Dropped support Go 1.6 and lower (#696)
+ - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
+ - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
+ - Improved the compatibility of the authentication system (#807)
+
+New Features:
+
+ - Multi-Results support (#537)
+ - `rejectReadOnly` DSN option (#604)
+ - `context.Context` support (#608, #612, #627, #761)
+ - Transaction isolation level support (#619, #744)
+ - Read-Only transactions support (#618, #634)
+ - `NewConfig` function which initializes a config with default values (#679)
+ - Implemented the `ColumnType` interfaces (#667, #724)
+ - Support for custom string types in `ConvertValue` (#623)
+ - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
+ - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
+ - Implemented `driver.SessionResetter` (#779)
+ - `sha256_password` authentication plugin support (#808)
+
+Bugfixes:
+
+ - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
+ - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
+ - Removed columns definition cache since it sometimes cached invalid data (#592)
+ - Don't mutate registered TLS configs (#600)
+ - Make RegisterTLSConfig concurrency-safe (#613)
+ - Handle missing auth data in the handshake packet correctly (#646)
+ - Do not retry queries when data was written to avoid data corruption (#302, #736)
+ - Cache the connection pointer for error handling before invalidating it (#678)
+ - Fixed imports for appengine/cloudsql (#700)
+ - Fix sending STMT_LONG_DATA for 0 byte data (#734)
+ - Set correct capacity for []bytes read from length-encoded strings (#766)
+ - Make RegisterDial concurrency-safe (#773)
+
+
+## Version 1.3 (2016-12-01)
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
+
+
+## Version 1.2 (2014-06-03)
+
+Changes:
+
+ - We switched back to a "rolling release". `go get` installs the current master branch again
+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
+ - Exported errors to allow easy checking from application code
+ - Enabled TCP Keepalives on TCP connections
+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
+ - The DSN parser also checks for a missing separating slash
+ - Faster binary date / datetime to string formatting
+ - Also exported the MySQLWarning type
+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
+ - writePacket() automatically writes the packet size to the header
+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+
+New Features:
+
+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
+ - Logging of critical errors is configurable with `SetLogger`
+ - Google CloudSQL support
+
+Bugfixes:
+
+ - Allow more than 32 parameters in prepared statements
+ - Various old_password fixes
+ - Fixed TestConcurrent test to pass Go's race detection
+ - Fixed appendLengthEncodedInteger for large numbers
+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
+
+
+## Version 1.1 (2013-11-02)
+
+Changes:
+
+  - Go-MySQL-Driver now requires Go 1.1
+  - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
+  - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
+  - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
+  - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
+  - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
+  - Optimized the buffer for reading
+  - stmt.Query now caches column metadata
+  - New Logo
+  - Changed the copyright header to include all contributors
+  - Improved the LOAD INFILE documentation
+  - The driver struct is now exported to make the driver directly accessible
+  - Refactored the driver tests
+  - Added more benchmarks and moved all to a separate file
+  - Other small refactoring
+
+New Features:
+
+  - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
+  - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
+  - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
+
+Bugfixes:
+
+  - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
+  - Convert to DB timezone when inserting `time.Time`
+  - Splitted packets (more than 16MB) are now merged correctly
+  - Fixed false positive `io.EOF` errors when the data was fully read
+  - Avoid panics on reuse of closed connections
+  - Fixed empty string producing false nil values
+  - Fixed sign byte for positive TIME fields
+
+
+## Version 1.0 (2013-05-14)
+
+Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..8fe16bc
--- /dev/null
@@ -0,0 +1,23 @@
+# Contributing Guidelines
+
+## Reporting Issues
+
+Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
+
+## Contributing Code
+
+By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
+Don't forget to add yourself to the AUTHORS file.
+
+### Code Review
+
+Everyone is invited to review and comment on pull requests.
+If it looks fine to you, comment with "LGTM" (Looks good to me).
+
+If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
+
+Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
+
+## Development Ideas
+
+If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
new file mode 100644 (file)
index 0000000..14e2f77
--- /dev/null
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+    means each individual or legal entity that creates, contributes to
+    the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+    means the combination of the Contributions of others (if any) used
+    by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+    means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+    means Source Code Form to which the initial Contributor has attached
+    the notice in Exhibit A, the Executable Form of such Source Code
+    Form, and Modifications of such Source Code Form, in each case
+    including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+    means
+
+    (a) that the initial Contributor has attached the notice described
+        in Exhibit B to the Covered Software; or
+
+    (b) that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the
+        terms of a Secondary License.
+
+1.6. "Executable Form"
+    means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+    means a work that combines Covered Software with other material, in 
+    a separate file or files, that is not Covered Software.
+
+1.8. "License"
+    means this document.
+
+1.9. "Licensable"
+    means having the right to grant, to the maximum extent possible,
+    whether at the time of the initial grant or subsequently, any and
+    all of the rights conveyed by this License.
+
+1.10. "Modifications"
+    means any of the following:
+
+    (a) any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered
+        Software; or
+
+    (b) any new file in Source Code Form that contains any Covered
+        Software.
+
+1.11. "Patent Claims" of a Contributor
+    means any patent claim(s), including without limitation, method,
+    process, and apparatus claims, in any patent Licensable by such
+    Contributor that would be infringed, but for the grant of the
+    License, by the making, using, selling, offering for sale, having
+    made, import, or transfer of either its Contributions or its
+    Contributor Version.
+
+1.12. "Secondary License"
+    means either the GNU General Public License, Version 2.0, the GNU
+    Lesser General Public License, Version 2.1, the GNU Affero General
+    Public License, Version 3.0, or any later versions of those
+    licenses.
+
+1.13. "Source Code Form"
+    means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+    means an individual or a legal entity exercising rights under this
+    License. For legal entities, "You" includes any entity that
+    controls, is controlled by, or is under common control with You. For
+    purposes of this definition, "control" means (a) the power, direct
+    or indirect, to cause the direction or management of such entity,
+    whether by contract or otherwise, or (b) ownership of more than
+    fifty percent (50%) of the outstanding shares or beneficial
+    ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+    Licensable by such Contributor to use, reproduce, make available,
+    modify, display, perform, distribute, and otherwise exploit its
+    Contributions, either on an unmodified basis, with Modifications, or
+    as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+    for sale, have made, import, and otherwise transfer either its
+    Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+    or
+
+(b) for infringements caused by: (i) Your and any other third party's
+    modifications of Covered Software, or (ii) the combination of its
+    Contributions with other software (except as part of its Contributor
+    Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+    its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+    Form, as described in Section 3.1, and You must inform recipients of
+    the Executable Form how they can obtain a copy of such Source Code
+    Form by reasonable means in a timely manner, at a charge no more
+    than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+    License, or sublicense it under different terms, provided that the
+    license for the Executable Form does not attempt to limit or alter
+    the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+*                                                                      *
+*  6. Disclaimer of Warranty                                           *
+*  -------------------------                                           *
+*                                                                      *
+*  Covered Software is provided under this License on an "as is"       *
+*  basis, without warranty of any kind, either expressed, implied, or  *
+*  statutory, including, without limitation, warranties that the       *
+*  Covered Software is free of defects, merchantable, fit for a        *
+*  particular purpose or non-infringing. The entire risk as to the     *
+*  quality and performance of the Covered Software is with You.        *
+*  Should any Covered Software prove defective in any respect, You     *
+*  (not any Contributor) assume the cost of any necessary servicing,   *
+*  repair, or correction. This disclaimer of warranty constitutes an   *
+*  essential part of this License. No use of any Covered Software is   *
+*  authorized under this License except under this disclaimer.         *
+*                                                                      *
+************************************************************************
+
+************************************************************************
+*                                                                      *
+*  7. Limitation of Liability                                          *
+*  --------------------------                                          *
+*                                                                      *
+*  Under no circumstances and under no legal theory, whether tort      *
+*  (including negligence), contract, or otherwise, shall any           *
+*  Contributor, or anyone who distributes Covered Software as          *
+*  permitted above, be liable to You for any direct, indirect,         *
+*  special, incidental, or consequential damages of any character      *
+*  including, without limitation, damages for lost profits, loss of    *
+*  goodwill, work stoppage, computer failure or malfunction, or any    *
+*  and all other commercial damages or losses, even if such party      *
+*  shall have been informed of the possibility of such damages. This   *
+*  limitation of liability shall not apply to liability for death or   *
+*  personal injury resulting from such party's negligence to the       *
+*  extent applicable law prohibits such limitation. Some               *
+*  jurisdictions do not allow the exclusion or limitation of           *
+*  incidental or consequential damages, so this exclusion and          *
+*  limitation may not apply to You.                                    *
+*                                                                      *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+  This Source Code Form is subject to the terms of the Mozilla Public
+  License, v. 2.0. If a copy of the MPL was not distributed with this
+  file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+  This Source Code Form is "Incompatible With Secondary Licenses", as
+  defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
new file mode 100644 (file)
index 0000000..7e7df1a
--- /dev/null
@@ -0,0 +1,490 @@
+# Go-MySQL-Driver
+
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
+
+![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
+
+---------------------------------------
+  * [Features](#features)
+  * [Requirements](#requirements)
+  * [Installation](#installation)
+  * [Usage](#usage)
+    * [DSN (Data Source Name)](#dsn-data-source-name)
+      * [Password](#password)
+      * [Protocol](#protocol)
+      * [Address](#address)
+      * [Parameters](#parameters)
+      * [Examples](#examples)
+    * [Connection pool and timeouts](#connection-pool-and-timeouts)
+    * [context.Context Support](#contextcontext-support)
+    * [ColumnType Support](#columntype-support)
+    * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
+    * [time.Time support](#timetime-support)
+    * [Unicode support](#unicode-support)
+  * [Testing / Development](#testing--development)
+  * [License](#license)
+
+---------------------------------------
+
+## Features
+  * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
+  * Native Go implementation. No C-bindings, just pure Go
+  * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+  * Automatic handling of broken connections
+  * Automatic Connection Pooling *(by database/sql package)*
+  * Supports queries larger than 16MB
+  * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
+  * Intelligent `LONG DATA` handling in prepared statements
+  * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
+  * Optional `time.Time` parsing
+  * Optional placeholder interpolation
+
+## Requirements
+  * Go 1.8 or higher. We aim to support the 3 latest versions of Go.
+  * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+---------------------------------------
+
+## Installation
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
+```bash
+$ go get -u github.com/go-sql-driver/mysql
+```
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
+
+## Usage
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
+
+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name)  as `dataSourceName`:
+```go
+import "database/sql"
+import _ "github.com/go-sql-driver/mysql"
+
+db, err := sql.Open("mysql", "user:password@/dbname")
+```
+
+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+
+
+### DSN (Data Source Name)
+
+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
+```
+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...&paramN=valueN]
+```
+
+A DSN in its fullest form:
+```
+username:password@protocol(address)/dbname?param=value
+```
+
+Except for the databasename, all values are optional. So the minimal DSN is:
+```
+/dbname
+```
+
+If you do not want to preselect a database, leave `dbname` empty:
+```
+/
+```
+This has the same effect as an empty DSN string:
+```
+
+```
+
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
+#### Password
+Passwords can consist of any character. Escaping is **not** necessary.
+
+#### Protocol
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+
+#### Address
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+
+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
+
+#### Parameters
+*Parameters are case-sensitive!*
+
+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
+
+##### `allowAllFiles`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+
+`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+
+##### `allowCleartextPasswords`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+##### `allowNativePasswords`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        true
+```
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
+
+##### `allowOldPasswords`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
+
+##### `charset`
+
+```
+Type:           string
+Valid Values:   <name>
+Default:        none
+```
+
+Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+
+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
+Unless you need the fallback behavior, please use `collation` instead.
+
+##### `collation`
+
+```
+Type:           string
+Valid Values:   <name>
+Default:        utf8_general_ci
+```
+
+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
+
+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
+
+##### `clientFoundRows`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+
+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+
+##### `columnsWithAlias`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+
+##### `loc`
+
+```
+Type:           string
+Valid Values:   <escaped name>
+Default:        UTC
+```
+
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
+
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+
+##### `maxAllowedPacket`
+```
+Type:          decimal number
+Default:       4194304
+```
+
+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+
+##### `multiStatements`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
+
+##### `parseTime`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+
+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
+
+
+##### `readTimeout`
+
+```
+Type:           duration
+Default:        0
+```
+
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+##### `rejectReadOnly`
+
+```
+Type:           bool
+Valid Values:   true, false
+Default:        false
+```
+
+
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
+
+
+##### `serverPubKey`
+
+```
+Type:           string
+Valid Values:   <name>
+Default:        none
+```
+
+Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
+Public keys are used to transmit encrypted data, e.g. for authentication.
+If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
+
+
+##### `timeout`
+
+```
+Type:           duration
+Default:        OS default
+```
+
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### `tls`
+
+```
+Type:           bool / string
+Valid Values:   true, false, skip-verify, <name>
+Default:        false
+```
+
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
+
+##### `writeTimeout`
+
+```
+Type:           duration
+Default:        0
+```
+
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### System Variables
+
+Any other parameters are interpreted as system variables:
+  * `<boolean_var>=<value>`: `SET <boolean_var>=<value>`
+  * `<enum_var>=<value>`: `SET <enum_var>=<value>`
+  * `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
+
+Rules:
+* The values for string variables must be quoted with `'`.
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`).
+
+Examples:
+  * `autocommit=1`: `SET autocommit=1`
+  * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+  * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
+
+
+#### Examples
+```
+user@unix(/path/to/socket)/dbname
+```
+
+```
+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
+```
+
+```
+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
+```
+
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
+```
+user:password@/dbname?sql_mode=TRADITIONAL
+```
+
+TCP via IPv6:
+```
+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
+```
+
+TCP on a remote host, e.g. Amazon RDS:
+```
+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
+```
+
+Google Cloud SQL on App Engine (First Generation MySQL Server):
+```
+user@cloudsql(project-id:instance-name)/dbname
+```
+
+Google Cloud SQL on App Engine (Second Generation MySQL Server):
+```
+user@cloudsql(project-id:regionname:instance-name)/dbname
+```
+
+TCP using default port (3306) on localhost:
+```
+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
+```
+
+Use the default protocol (tcp) and host (localhost:3306):
+```
+user:password@/dbname
+```
+
+No Database preselected:
+```
+user:password@/
+```
+
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
+### `LOAD DATA LOCAL INFILE` support
+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
+```go
+import "github.com/go-sql-driver/mysql"
+```
+
+Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
+
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+
+
+### `time.Time` support
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
+
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+
+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
+
+Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
+
+
+### Unicode support
+Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
+
+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+
+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+
+See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
+
+## Testing / Development
+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
+
+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
+
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
+
+---------------------------------------
+
+## License
+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+Mozilla summarizes the license scope as follows:
+> MPL: The copyleft applies to any files containing MPLed code.
+
+
+That means:
+  * You can **use** the **unchanged** source code both in private and commercially.
+  * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+  * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
+
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
+
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
+
+![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
+
diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go
new file mode 100644 (file)
index 0000000..be41f2e
--- /dev/null
@@ -0,0 +1,19 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build appengine
+
+package mysql
+
+import (
+       "google.golang.org/appengine/cloudsql"
+)
+
+func init() {
+       RegisterDial("cloudsql", cloudsql.Dial)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
new file mode 100644 (file)
index 0000000..2f61ecd
--- /dev/null
@@ -0,0 +1,420 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "crypto/rand"
+       "crypto/rsa"
+       "crypto/sha1"
+       "crypto/sha256"
+       "crypto/x509"
+       "encoding/pem"
+       "sync"
+)
+
+// server pub keys registry
+var (
+       serverPubKeyLock     sync.RWMutex
+       serverPubKeyRegistry map[string]*rsa.PublicKey
+)
+
+// RegisterServerPubKey registers a server RSA public key which can be used to
+// send data in a secure manner to the server without receiving the public key
+// in a potentially insecure way from the server first.
+// Registered keys can afterwards be used adding serverPubKey=<name> to the DSN.
+//
+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
+// after registering it and may not be modified.
+//
+//  data, err := ioutil.ReadFile("mykey.pem")
+//  if err != nil {
+//     log.Fatal(err)
+//  }
+//
+//  block, _ := pem.Decode(data)
+//  if block == nil || block.Type != "PUBLIC KEY" {
+//     log.Fatal("failed to decode PEM block containing public key")
+//  }
+//
+//  pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+//  if err != nil {
+//     log.Fatal(err)
+//  }
+//
+//  if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
+//     mysql.RegisterServerPubKey("mykey", rsaPubKey)
+//  } else {
+//     log.Fatal("not a RSA public key")
+//  }
+//
+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
+       serverPubKeyLock.Lock()
+       if serverPubKeyRegistry == nil {
+               serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
+       }
+
+       serverPubKeyRegistry[name] = pubKey
+       serverPubKeyLock.Unlock()
+}
+
+// DeregisterServerPubKey removes the public key registered with the given name.
+func DeregisterServerPubKey(name string) {
+       serverPubKeyLock.Lock()
+       if serverPubKeyRegistry != nil {
+               delete(serverPubKeyRegistry, name)
+       }
+       serverPubKeyLock.Unlock()
+}
+
+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
+       serverPubKeyLock.RLock()
+       if v, ok := serverPubKeyRegistry[name]; ok {
+               pubKey = v
+       }
+       serverPubKeyLock.RUnlock()
+       return
+}
+
+// Hash password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+       seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+       return &myRnd{
+               seed1: seed1 % myRndMaxVal,
+               seed2: seed2 % myRndMaxVal,
+       }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+       r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+       r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+       return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+       var add uint32 = 7
+       var tmp uint32
+
+       result[0] = 1345345333
+       result[1] = 0x12345671
+
+       for _, c := range password {
+               // skip spaces and tabs in password
+               if c == ' ' || c == '\t' {
+                       continue
+               }
+
+               tmp = uint32(c)
+               result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+               result[1] += (result[1] << 8) ^ result[0]
+               add += tmp
+       }
+
+       // Remove sign bit (1<<31)-1)
+       result[0] &= 0x7FFFFFFF
+       result[1] &= 0x7FFFFFFF
+
+       return
+}
+
+// Hash password using insecure pre 4.1 method
+func scrambleOldPassword(scramble []byte, password string) []byte {
+       if len(password) == 0 {
+               return nil
+       }
+
+       scramble = scramble[:8]
+
+       hashPw := pwHash([]byte(password))
+       hashSc := pwHash(scramble)
+
+       r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+       var out [8]byte
+       for i := range out {
+               out[i] = r.NextByte() + 64
+       }
+
+       mask := r.NextByte()
+       for i := range out {
+               out[i] ^= mask
+       }
+
+       return out[:]
+}
+
+// Hash password using 4.1+ method (SHA1)
+func scramblePassword(scramble []byte, password string) []byte {
+       if len(password) == 0 {
+               return nil
+       }
+
+       // stage1Hash = SHA1(password)
+       crypt := sha1.New()
+       crypt.Write([]byte(password))
+       stage1 := crypt.Sum(nil)
+
+       // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+       // inner Hash
+       crypt.Reset()
+       crypt.Write(stage1)
+       hash := crypt.Sum(nil)
+
+       // outer Hash
+       crypt.Reset()
+       crypt.Write(scramble)
+       crypt.Write(hash)
+       scramble = crypt.Sum(nil)
+
+       // token = scrambleHash XOR stage1Hash
+       for i := range scramble {
+               scramble[i] ^= stage1[i]
+       }
+       return scramble
+}
+
+// Hash password using MySQL 8+ method (SHA256)
+func scrambleSHA256Password(scramble []byte, password string) []byte {
+       if len(password) == 0 {
+               return nil
+       }
+
+       // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
+
+       crypt := sha256.New()
+       crypt.Write([]byte(password))
+       message1 := crypt.Sum(nil)
+
+       crypt.Reset()
+       crypt.Write(message1)
+       message1Hash := crypt.Sum(nil)
+
+       crypt.Reset()
+       crypt.Write(message1Hash)
+       crypt.Write(scramble)
+       message2 := crypt.Sum(nil)
+
+       for i := range message1 {
+               message1[i] ^= message2[i]
+       }
+
+       return message1
+}
+
+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
+       plain := make([]byte, len(password)+1)
+       copy(plain, password)
+       for i := range plain {
+               j := i % len(seed)
+               plain[i] ^= seed[j]
+       }
+       sha1 := sha1.New()
+       return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
+}
+
+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
+       enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
+       if err != nil {
+               return err
+       }
+       return mc.writeAuthSwitchPacket(enc, false)
+}
+
+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, bool, error) {
+       switch plugin {
+       case "caching_sha2_password":
+               authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
+               return authResp, false, nil
+
+       case "mysql_old_password":
+               if !mc.cfg.AllowOldPasswords {
+                       return nil, false, ErrOldPassword
+               }
+               // Note: there are edge cases where this should work but doesn't;
+               // this is currently "wontfix":
+               // https://github.com/go-sql-driver/mysql/issues/184
+               authResp := scrambleOldPassword(authData[:8], mc.cfg.Passwd)
+               return authResp, true, nil
+
+       case "mysql_clear_password":
+               if !mc.cfg.AllowCleartextPasswords {
+                       return nil, false, ErrCleartextPassword
+               }
+               // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+               // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+               return []byte(mc.cfg.Passwd), true, nil
+
+       case "mysql_native_password":
+               if !mc.cfg.AllowNativePasswords {
+                       return nil, false, ErrNativePassword
+               }
+               // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+               // Native password authentication only need and will need 20-byte challenge.
+               authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
+               return authResp, false, nil
+
+       case "sha256_password":
+               if len(mc.cfg.Passwd) == 0 {
+                       return nil, true, nil
+               }
+               if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+                       // write cleartext auth packet
+                       return []byte(mc.cfg.Passwd), true, nil
+               }
+
+               pubKey := mc.cfg.pubKey
+               if pubKey == nil {
+                       // request public key from server
+                       return []byte{1}, false, nil
+               }
+
+               // encrypted password
+               enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
+               return enc, false, err
+
+       default:
+               errLog.Print("unknown auth plugin:", plugin)
+               return nil, false, ErrUnknownPlugin
+       }
+}
+
+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
+       // Read Result Packet
+       authData, newPlugin, err := mc.readAuthResult()
+       if err != nil {
+               return err
+       }
+
+       // handle auth plugin switch, if requested
+       if newPlugin != "" {
+               // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+               // sent and we have to keep using the cipher sent in the init packet.
+               if authData == nil {
+                       authData = oldAuthData
+               } else {
+                       // copy data from read buffer to owned slice
+                       copy(oldAuthData, authData)
+               }
+
+               plugin = newPlugin
+
+               authResp, addNUL, err := mc.auth(authData, plugin)
+               if err != nil {
+                       return err
+               }
+               if err = mc.writeAuthSwitchPacket(authResp, addNUL); err != nil {
+                       return err
+               }
+
+               // Read Result Packet
+               authData, newPlugin, err = mc.readAuthResult()
+               if err != nil {
+                       return err
+               }
+
+               // Do not allow to change the auth plugin more than once
+               if newPlugin != "" {
+                       return ErrMalformPkt
+               }
+       }
+
+       switch plugin {
+
+       // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+       case "caching_sha2_password":
+               switch len(authData) {
+               case 0:
+                       return nil // auth successful
+               case 1:
+                       switch authData[0] {
+                       case cachingSha2PasswordFastAuthSuccess:
+                               if err = mc.readResultOK(); err == nil {
+                                       return nil // auth successful
+                               }
+
+                       case cachingSha2PasswordPerformFullAuthentication:
+                               if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+                                       // write cleartext auth packet
+                                       err = mc.writeAuthSwitchPacket([]byte(mc.cfg.Passwd), true)
+                                       if err != nil {
+                                               return err
+                                       }
+                               } else {
+                                       pubKey := mc.cfg.pubKey
+                                       if pubKey == nil {
+                                               // request public key from server
+                                               data := mc.buf.takeSmallBuffer(4 + 1)
+                                               data[4] = cachingSha2PasswordRequestPublicKey
+                                               mc.writePacket(data)
+
+                                               // parse public key
+                                               data, err := mc.readPacket()
+                                               if err != nil {
+                                                       return err
+                                               }
+
+                                               block, _ := pem.Decode(data[1:])
+                                               pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
+                                               if err != nil {
+                                                       return err
+                                               }
+                                               pubKey = pkix.(*rsa.PublicKey)
+                                       }
+
+                                       // send encrypted password
+                                       err = mc.sendEncryptedPassword(oldAuthData, pubKey)
+                                       if err != nil {
+                                               return err
+                                       }
+                               }
+                               return mc.readResultOK()
+
+                       default:
+                               return ErrMalformPkt
+                       }
+               default:
+                       return ErrMalformPkt
+               }
+
+       case "sha256_password":
+               switch len(authData) {
+               case 0:
+                       return nil // auth successful
+               default:
+                       block, _ := pem.Decode(authData)
+                       pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+                       if err != nil {
+                               return err
+                       }
+
+                       // send encrypted password
+                       err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
+                       if err != nil {
+                               return err
+                       }
+                       return mc.readResultOK()
+               }
+
+       default:
+               return nil // auth successful
+       }
+
+       return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/auth_test.go b/vendor/github.com/go-sql-driver/mysql/auth_test.go
new file mode 100644 (file)
index 0000000..bd0e218
--- /dev/null
@@ -0,0 +1,1260 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "crypto/rsa"
+       "crypto/tls"
+       "crypto/x509"
+       "encoding/pem"
+       "fmt"
+       "testing"
+)
+
+var testPubKey = []byte("-----BEGIN PUBLIC KEY-----\n" +
+       "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAol0Z8G8U+25Btxk/g/fm\n" +
+       "UAW/wEKjQCTjkibDE4B+qkuWeiumg6miIRhtilU6m9BFmLQSy1ltYQuu4k17A4tQ\n" +
+       "rIPpOQYZges/qsDFkZh3wyK5jL5WEFVdOasf6wsfszExnPmcZS4axxoYJfiuilrN\n" +
+       "hnwinBAqfi3S0sw5MpSI4Zl1AbOrHG4zDI62Gti2PKiMGyYDZTS9xPrBLbN95Kby\n" +
+       "FFclQLEzA9RJcS1nHFsWtRgHjGPhhjCQxEm9NQ1nePFhCfBfApyfH1VM2VCOQum6\n" +
+       "Ci9bMuHWjTjckC84mzF99kOxOWVU7mwS6gnJqBzpuz8t3zq8/iQ2y7QrmZV+jTJP\n" +
+       "WQIDAQAB\n" +
+       "-----END PUBLIC KEY-----\n")
+
+var testPubKeyRSA *rsa.PublicKey
+
+func init() {
+       block, _ := pem.Decode(testPubKey)
+       pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+       if err != nil {
+               panic(err)
+       }
+       testPubKeyRSA = pub.(*rsa.PublicKey)
+}
+
+func TestScrambleOldPass(t *testing.T) {
+       scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2}
+       vectors := []struct {
+               pass string
+               out  string
+       }{
+               {" pass", "47575c5a435b4251"},
+               {"pass ", "47575c5a435b4251"},
+               {"123\t456", "575c47505b5b5559"},
+               {"C0mpl!ca ted#PASS123", "5d5d554849584a45"},
+       }
+       for _, tuple := range vectors {
+               ours := scrambleOldPassword(scramble, tuple.pass)
+               if tuple.out != fmt.Sprintf("%x", ours) {
+                       t.Errorf("Failed old password %q", tuple.pass)
+               }
+       }
+}
+
+func TestScrambleSHA256Pass(t *testing.T) {
+       scramble := []byte{10, 47, 74, 111, 75, 73, 34, 48, 88, 76, 114, 74, 37, 13, 3, 80, 82, 2, 23, 21}
+       vectors := []struct {
+               pass string
+               out  string
+       }{
+               {"secret", "f490e76f66d9d86665ce54d98c78d0acfe2fb0b08b423da807144873d30b312c"},
+               {"secret2", "abc3934a012cf342e876071c8ee202de51785b430258a7a0138bc79c4d800bc6"},
+       }
+       for _, tuple := range vectors {
+               ours := scrambleSHA256Password(scramble, tuple.pass)
+               if tuple.out != fmt.Sprintf("%x", ours) {
+                       t.Errorf("Failed SHA256 password %q", tuple.pass)
+               }
+       }
+}
+
+func TestAuthFastCachingSHA256PasswordCached(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+
+       authData := []byte{90, 105, 74, 126, 30, 48, 37, 56, 3, 23, 115, 127, 69,
+               22, 41, 84, 32, 123, 43, 118}
+       plugin := "caching_sha2_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       expectedAuthResp := []byte{102, 32, 5, 35, 143, 161, 140, 241, 171, 232, 56,
+               139, 43, 14, 107, 196, 249, 170, 147, 60, 220, 204, 120, 178, 214, 15,
+               184, 150, 26, 61, 57, 235}
+       if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               2, 0, 0, 2, 1, 3, // Fast Auth Success
+               7, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, // OK
+       }
+       conn.maxReads = 1
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+}
+
+func TestAuthFastCachingSHA256PasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = ""
+
+       authData := []byte{90, 105, 74, 126, 30, 48, 37, 56, 3, 23, 115, 127, 69,
+               22, 41, 84, 32, 123, 43, 118}
+       plugin := "caching_sha2_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       if writtenAuthRespLen != 0 {
+               t.Fatalf("unexpected written auth response (%d bytes): %v",
+                       writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
+       }
+       conn.maxReads = 1
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+}
+
+func TestAuthFastCachingSHA256PasswordFullRSA(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+
+       authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
+               62, 94, 83, 80, 52, 85}
+       plugin := "caching_sha2_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
+               49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
+               110, 40, 139, 124, 41}
+       if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               2, 0, 0, 2, 1, 4, // Perform Full Authentication
+       }
+       conn.queuedReplies = [][]byte{
+               // pub key response
+               append([]byte{byte(1 + len(testPubKey)), 1, 0, 4, 1}, testPubKey...),
+
+               // OK
+               {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 3
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       if !bytes.HasPrefix(conn.written, []byte{1, 0, 0, 3, 2, 0, 1, 0, 5}) {
+               t.Errorf("unexpected written data: %v", conn.written)
+       }
+}
+
+func TestAuthFastCachingSHA256PasswordFullRSAWithKey(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+       mc.cfg.pubKey = testPubKeyRSA
+
+       authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
+               62, 94, 83, 80, 52, 85}
+       plugin := "caching_sha2_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
+               49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
+               110, 40, 139, 124, 41}
+       if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               2, 0, 0, 2, 1, 4, // Perform Full Authentication
+       }
+       conn.queuedReplies = [][]byte{
+               // OK
+               {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 2
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
+               t.Errorf("unexpected written data: %v", conn.written)
+       }
+}
+
+func TestAuthFastCachingSHA256PasswordFullSecure(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+
+       authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
+               62, 94, 83, 80, 52, 85}
+       plugin := "caching_sha2_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Hack to make the caching_sha2_password plugin believe that the connection
+       // is secure
+       mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
+               49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
+               110, 40, 139, 124, 41}
+       if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               2, 0, 0, 2, 1, 4, // Perform Full Authentication
+       }
+       conn.queuedReplies = [][]byte{
+               // OK
+               {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 3
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       if !bytes.Equal(conn.written, []byte{7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0}) {
+               t.Errorf("unexpected written data: %v", conn.written)
+       }
+}
+
+func TestAuthFastCleartextPasswordNotAllowed(t *testing.T) {
+       _, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+
+       authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
+               103, 26, 95, 81, 17, 24, 21}
+       plugin := "mysql_clear_password"
+
+       // Send Client Authentication Packet
+       _, _, err := mc.auth(authData, plugin)
+       if err != ErrCleartextPassword {
+               t.Errorf("expected ErrCleartextPassword, got %v", err)
+       }
+}
+
+func TestAuthFastCleartextPassword(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+       mc.cfg.AllowCleartextPasswords = true
+
+       authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
+               103, 26, 95, 81, 17, 24, 21}
+       plugin := "mysql_clear_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       expectedAuthResp := []byte{115, 101, 99, 114, 101, 116}
+       if writtenAuthRespLen != 6 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
+       }
+       conn.maxReads = 1
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+}
+
+func TestAuthFastCleartextPasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = ""
+       mc.cfg.AllowCleartextPasswords = true
+
+       authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
+               103, 26, 95, 81, 17, 24, 21}
+       plugin := "mysql_clear_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       if writtenAuthRespLen != 0 {
+               t.Fatalf("unexpected written auth response (%d bytes): %v",
+                       writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
+       }
+       conn.maxReads = 1
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+}
+
+func TestAuthFastNativePasswordNotAllowed(t *testing.T) {
+       _, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+       mc.cfg.AllowNativePasswords = false
+
+       authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
+               103, 26, 95, 81, 17, 24, 21}
+       plugin := "mysql_native_password"
+
+       // Send Client Authentication Packet
+       _, _, err := mc.auth(authData, plugin)
+       if err != ErrNativePassword {
+               t.Errorf("expected ErrNativePassword, got %v", err)
+       }
+}
+
+func TestAuthFastNativePassword(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+
+       authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
+               103, 26, 95, 81, 17, 24, 21}
+       plugin := "mysql_native_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       expectedAuthResp := []byte{53, 177, 140, 159, 251, 189, 127, 53, 109, 252,
+               172, 50, 211, 192, 240, 164, 26, 48, 207, 45}
+       if writtenAuthRespLen != 20 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
+       }
+       conn.maxReads = 1
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+}
+
+func TestAuthFastNativePasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = ""
+
+       authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
+               103, 26, 95, 81, 17, 24, 21}
+       plugin := "mysql_native_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       if writtenAuthRespLen != 0 {
+               t.Fatalf("unexpected written auth response (%d bytes): %v",
+                       writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response
+       conn.data = []byte{
+               7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
+       }
+       conn.maxReads = 1
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+}
+
+func TestAuthFastSHA256PasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = ""
+
+       authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
+               62, 94, 83, 80, 52, 85}
+       plugin := "sha256_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       if writtenAuthRespLen != 0 {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response (pub key response)
+       conn.data = append([]byte{byte(1 + len(testPubKey)), 1, 0, 2, 1}, testPubKey...)
+       conn.queuedReplies = [][]byte{
+               // OK
+               {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 2
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
+               t.Errorf("unexpected written data: %v", conn.written)
+       }
+}
+
+func TestAuthFastSHA256PasswordRSA(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+
+       authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
+               62, 94, 83, 80, 52, 85}
+       plugin := "sha256_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp)
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       expectedAuthResp := []byte{1}
+       if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response (pub key response)
+       conn.data = append([]byte{byte(1 + len(testPubKey)), 1, 0, 2, 1}, testPubKey...)
+       conn.queuedReplies = [][]byte{
+               // OK
+               {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 2
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
+               t.Errorf("unexpected written data: %v", conn.written)
+       }
+}
+
+func TestAuthFastSHA256PasswordRSAWithKey(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+       mc.cfg.pubKey = testPubKeyRSA
+
+       authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
+               62, 94, 83, 80, 52, 85}
+       plugin := "sha256_password"
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // auth response (OK)
+       conn.data = []byte{7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0}
+       conn.maxReads = 1
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+}
+
+func TestAuthFastSHA256PasswordSecure(t *testing.T) {
+       conn, mc := newRWMockConn(1)
+       mc.cfg.User = "root"
+       mc.cfg.Passwd = "secret"
+
+       // hack to make the caching_sha2_password plugin believe that the connection
+       // is secure
+       mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
+
+       authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
+               62, 94, 83, 80, 52, 85}
+       plugin := "sha256_password"
+
+       // send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // unset TLS config to prevent the actual establishment of a TLS wrapper
+       mc.cfg.tls = nil
+
+       err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // check written auth response
+       authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+       authRespEnd := authRespStart + 1 + len(authResp) + 1
+       writtenAuthRespLen := conn.written[authRespStart]
+       writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+       expectedAuthResp := []byte{115, 101, 99, 114, 101, 116, 0}
+       if writtenAuthRespLen != 6 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+               t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
+       }
+       conn.written = nil
+
+       // auth response (OK)
+       conn.data = []byte{7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0}
+       conn.maxReads = 1
+
+       // Handle response to auth packet
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       if !bytes.Equal(conn.written, []byte{}) {
+               t.Errorf("unexpected written data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchCachingSHA256PasswordCached(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = "secret"
+
+       // auth switch request
+       conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
+               115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
+               11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
+               50, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{
+               {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}, // OK
+       }
+       conn.maxReads = 3
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{
+               // 1. Packet: Hash
+               32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
+               54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
+               153, 9, 130,
+       }
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchCachingSHA256PasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = ""
+
+       // auth switch request
+       conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
+               115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
+               11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
+               50, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
+       conn.maxReads = 2
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{0, 0, 0, 3}
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchCachingSHA256PasswordFullRSA(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = "secret"
+
+       // auth switch request
+       conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
+               115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
+               11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
+               50, 0}
+
+       conn.queuedReplies = [][]byte{
+               // Perform Full Authentication
+               {2, 0, 0, 4, 1, 4},
+
+               // Pub Key Response
+               append([]byte{byte(1 + len(testPubKey)), 1, 0, 6, 1}, testPubKey...),
+
+               // OK
+               {7, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 4
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReplyPrefix := []byte{
+               // 1. Packet: Hash
+               32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
+               54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
+               153, 9, 130,
+
+               // 2. Packet: Pub Key Request
+               1, 0, 0, 5, 2,
+
+               // 3. Packet: Encrypted Password
+               0, 1, 0, 7, // [changing bytes]
+       }
+       if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchCachingSHA256PasswordFullRSAWithKey(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = "secret"
+       mc.cfg.pubKey = testPubKeyRSA
+
+       // auth switch request
+       conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
+               115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
+               11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
+               50, 0}
+
+       conn.queuedReplies = [][]byte{
+               // Perform Full Authentication
+               {2, 0, 0, 4, 1, 4},
+
+               // OK
+               {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 3
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReplyPrefix := []byte{
+               // 1. Packet: Hash
+               32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
+               54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
+               153, 9, 130,
+
+               // 2. Packet: Encrypted Password
+               0, 1, 0, 5, // [changing bytes]
+       }
+       if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchCachingSHA256PasswordFullSecure(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = "secret"
+
+       // Hack to make the caching_sha2_password plugin believe that the connection
+       // is secure
+       mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
+
+       // auth switch request
+       conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
+               115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
+               11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
+               50, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{
+               {2, 0, 0, 4, 1, 4},                // Perform Full Authentication
+               {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0}, // OK
+       }
+       conn.maxReads = 3
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{
+               // 1. Packet: Hash
+               32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
+               54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
+               153, 9, 130,
+
+               // 2. Packet: Cleartext password
+               7, 0, 0, 5, 115, 101, 99, 114, 101, 116, 0,
+       }
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchCleartextPasswordNotAllowed(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+
+       conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
+               101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
+       conn.maxReads = 1
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+       err := mc.handleAuthResult(authData, plugin)
+       if err != ErrCleartextPassword {
+               t.Errorf("expected ErrCleartextPassword, got %v", err)
+       }
+}
+
+func TestAuthSwitchCleartextPassword(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.AllowCleartextPasswords = true
+       mc.cfg.Passwd = "secret"
+
+       // auth switch request
+       conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
+               101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
+       conn.maxReads = 2
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0}
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchCleartextPasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.AllowCleartextPasswords = true
+       mc.cfg.Passwd = ""
+
+       // auth switch request
+       conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
+               101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
+       conn.maxReads = 2
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{1, 0, 0, 3, 0}
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchNativePasswordNotAllowed(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.AllowNativePasswords = false
+
+       conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
+               116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
+               71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
+               31, 0}
+       conn.maxReads = 1
+       authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
+               48, 31, 89, 39, 55, 31}
+       plugin := "caching_sha2_password"
+       err := mc.handleAuthResult(authData, plugin)
+       if err != ErrNativePassword {
+               t.Errorf("expected ErrNativePassword, got %v", err)
+       }
+}
+
+func TestAuthSwitchNativePassword(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.AllowNativePasswords = true
+       mc.cfg.Passwd = "secret"
+
+       // auth switch request
+       conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
+               116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
+               71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
+               31, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
+       conn.maxReads = 2
+
+       authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
+               48, 31, 89, 39, 55, 31}
+       plugin := "caching_sha2_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{20, 0, 0, 3, 202, 41, 195, 164, 34, 226, 49, 103,
+               21, 211, 167, 199, 227, 116, 8, 48, 57, 71, 149, 146}
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchNativePasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.AllowNativePasswords = true
+       mc.cfg.Passwd = ""
+
+       // auth switch request
+       conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
+               116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
+               71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
+               31, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
+       conn.maxReads = 2
+
+       authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
+               48, 31, 89, 39, 55, 31}
+       plugin := "caching_sha2_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{0, 0, 0, 3}
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchOldPasswordNotAllowed(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+
+       conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
+               100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
+               49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
+       conn.maxReads = 1
+       authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
+               84, 96, 101, 92, 123, 121, 107}
+       plugin := "mysql_native_password"
+       err := mc.handleAuthResult(authData, plugin)
+       if err != ErrOldPassword {
+               t.Errorf("expected ErrOldPassword, got %v", err)
+       }
+}
+
+func TestAuthSwitchOldPassword(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.AllowOldPasswords = true
+       mc.cfg.Passwd = "secret"
+
+       // auth switch request
+       conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
+               100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
+               49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
+       conn.maxReads = 2
+
+       authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
+               84, 96, 101, 92, 123, 121, 107}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{9, 0, 0, 3, 86, 83, 83, 79, 74, 78, 65, 66, 0}
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchOldPasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.AllowOldPasswords = true
+       mc.cfg.Passwd = ""
+
+       // auth switch request
+       conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
+               100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
+               49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
+
+       // auth response
+       conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
+       conn.maxReads = 2
+
+       authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
+               84, 96, 101, 92, 123, 121, 107}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReply := []byte{1, 0, 0, 3, 0}
+       if !bytes.Equal(conn.written, expectedReply) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchSHA256PasswordEmpty(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = ""
+
+       // auth switch request
+       conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
+               115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
+               33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
+
+       conn.queuedReplies = [][]byte{
+               // OK
+               {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 3
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReplyPrefix := []byte{
+               // 1. Packet: Empty Password
+               1, 0, 0, 3, 0,
+       }
+       if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchSHA256PasswordRSA(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = "secret"
+
+       // auth switch request
+       conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
+               115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
+               33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
+
+       conn.queuedReplies = [][]byte{
+               // Pub Key Response
+               append([]byte{byte(1 + len(testPubKey)), 1, 0, 4, 1}, testPubKey...),
+
+               // OK
+               {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 3
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReplyPrefix := []byte{
+               // 1. Packet: Pub Key Request
+               1, 0, 0, 3, 1,
+
+               // 2. Packet: Encrypted Password
+               0, 1, 0, 5, // [changing bytes]
+       }
+       if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchSHA256PasswordRSAWithKey(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = "secret"
+       mc.cfg.pubKey = testPubKeyRSA
+
+       // auth switch request
+       conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
+               115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
+               33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
+
+       conn.queuedReplies = [][]byte{
+               // OK
+               {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 2
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReplyPrefix := []byte{
+               // 1. Packet: Encrypted Password
+               0, 1, 0, 3, // [changing bytes]
+       }
+       if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
+
+func TestAuthSwitchSHA256PasswordSecure(t *testing.T) {
+       conn, mc := newRWMockConn(2)
+       mc.cfg.Passwd = "secret"
+
+       // Hack to make the caching_sha2_password plugin believe that the connection
+       // is secure
+       mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
+
+       // auth switch request
+       conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
+               115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
+               33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
+
+       conn.queuedReplies = [][]byte{
+               // OK
+               {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
+       }
+       conn.maxReads = 2
+
+       authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
+               47, 43, 9, 41, 112, 67, 110}
+       plugin := "mysql_native_password"
+
+       if err := mc.handleAuthResult(authData, plugin); err != nil {
+               t.Errorf("got error: %v", err)
+       }
+
+       expectedReplyPrefix := []byte{
+               // 1. Packet: Cleartext Password
+               7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0,
+       }
+       if !bytes.Equal(conn.written, expectedReplyPrefix) {
+               t.Errorf("got unexpected data: %v", conn.written)
+       }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
new file mode 100644 (file)
index 0000000..5828d40
--- /dev/null
@@ -0,0 +1,319 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "context"
+       "database/sql"
+       "database/sql/driver"
+       "fmt"
+       "math"
+       "runtime"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "testing"
+       "time"
+)
+
+type TB testing.B
+
+func (tb *TB) check(err error) {
+       if err != nil {
+               tb.Fatal(err)
+       }
+}
+
+func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB {
+       tb.check(err)
+       return db
+}
+
+func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows {
+       tb.check(err)
+       return rows
+}
+
+func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
+       tb.check(err)
+       return stmt
+}
+
+func initDB(b *testing.B, queries ...string) *sql.DB {
+       tb := (*TB)(b)
+       db := tb.checkDB(sql.Open("mysql", dsn))
+       for _, query := range queries {
+               if _, err := db.Exec(query); err != nil {
+                       b.Fatalf("error on %q: %v", query, err)
+               }
+       }
+       return db
+}
+
+const concurrencyLevel = 10
+
+func BenchmarkQuery(b *testing.B) {
+       tb := (*TB)(b)
+       b.StopTimer()
+       b.ReportAllocs()
+       db := initDB(b,
+               "DROP TABLE IF EXISTS foo",
+               "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
+               `INSERT INTO foo VALUES (1, "one")`,
+               `INSERT INTO foo VALUES (2, "two")`,
+       )
+       db.SetMaxIdleConns(concurrencyLevel)
+       defer db.Close()
+
+       stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?"))
+       defer stmt.Close()
+
+       remain := int64(b.N)
+       var wg sync.WaitGroup
+       wg.Add(concurrencyLevel)
+       defer wg.Wait()
+       b.StartTimer()
+
+       for i := 0; i < concurrencyLevel; i++ {
+               go func() {
+                       for {
+                               if atomic.AddInt64(&remain, -1) < 0 {
+                                       wg.Done()
+                                       return
+                               }
+
+                               var got string
+                               tb.check(stmt.QueryRow(1).Scan(&got))
+                               if got != "one" {
+                                       b.Errorf("query = %q; want one", got)
+                                       wg.Done()
+                                       return
+                               }
+                       }
+               }()
+       }
+}
+
+func BenchmarkExec(b *testing.B) {
+       tb := (*TB)(b)
+       b.StopTimer()
+       b.ReportAllocs()
+       db := tb.checkDB(sql.Open("mysql", dsn))
+       db.SetMaxIdleConns(concurrencyLevel)
+       defer db.Close()
+
+       stmt := tb.checkStmt(db.Prepare("DO 1"))
+       defer stmt.Close()
+
+       remain := int64(b.N)
+       var wg sync.WaitGroup
+       wg.Add(concurrencyLevel)
+       defer wg.Wait()
+       b.StartTimer()
+
+       for i := 0; i < concurrencyLevel; i++ {
+               go func() {
+                       for {
+                               if atomic.AddInt64(&remain, -1) < 0 {
+                                       wg.Done()
+                                       return
+                               }
+
+                               if _, err := stmt.Exec(); err != nil {
+                                       b.Fatal(err.Error())
+                               }
+                       }
+               }()
+       }
+}
+
+// data, but no db writes
+var roundtripSample []byte
+
+func initRoundtripBenchmarks() ([]byte, int, int) {
+       if roundtripSample == nil {
+               roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024))
+       }
+       return roundtripSample, 16, len(roundtripSample)
+}
+
+func BenchmarkRoundtripTxt(b *testing.B) {
+       b.StopTimer()
+       sample, min, max := initRoundtripBenchmarks()
+       sampleString := string(sample)
+       b.ReportAllocs()
+       tb := (*TB)(b)
+       db := tb.checkDB(sql.Open("mysql", dsn))
+       defer db.Close()
+       b.StartTimer()
+       var result string
+       for i := 0; i < b.N; i++ {
+               length := min + i
+               if length > max {
+                       length = max
+               }
+               test := sampleString[0:length]
+               rows := tb.checkRows(db.Query(`SELECT "` + test + `"`))
+               if !rows.Next() {
+                       rows.Close()
+                       b.Fatalf("crashed")
+               }
+               err := rows.Scan(&result)
+               if err != nil {
+                       rows.Close()
+                       b.Fatalf("crashed")
+               }
+               if result != test {
+                       rows.Close()
+                       b.Errorf("mismatch")
+               }
+               rows.Close()
+       }
+}
+
+func BenchmarkRoundtripBin(b *testing.B) {
+       b.StopTimer()
+       sample, min, max := initRoundtripBenchmarks()
+       b.ReportAllocs()
+       tb := (*TB)(b)
+       db := tb.checkDB(sql.Open("mysql", dsn))
+       defer db.Close()
+       stmt := tb.checkStmt(db.Prepare("SELECT ?"))
+       defer stmt.Close()
+       b.StartTimer()
+       var result sql.RawBytes
+       for i := 0; i < b.N; i++ {
+               length := min + i
+               if length > max {
+                       length = max
+               }
+               test := sample[0:length]
+               rows := tb.checkRows(stmt.Query(test))
+               if !rows.Next() {
+                       rows.Close()
+                       b.Fatalf("crashed")
+               }
+               err := rows.Scan(&result)
+               if err != nil {
+                       rows.Close()
+                       b.Fatalf("crashed")
+               }
+               if !bytes.Equal(result, test) {
+                       rows.Close()
+                       b.Errorf("mismatch")
+               }
+               rows.Close()
+       }
+}
+
+func BenchmarkInterpolation(b *testing.B) {
+       mc := &mysqlConn{
+               cfg: &Config{
+                       InterpolateParams: true,
+                       Loc:               time.UTC,
+               },
+               maxAllowedPacket: maxPacketSize,
+               maxWriteSize:     maxPacketSize - 1,
+               buf:              newBuffer(nil),
+       }
+
+       args := []driver.Value{
+               int64(42424242),
+               float64(math.Pi),
+               false,
+               time.Unix(1423411542, 807015000),
+               []byte("bytes containing special chars ' \" \a \x00"),
+               "string containing special chars ' \" \a \x00",
+       }
+       q := "SELECT ?, ?, ?, ?, ?, ?"
+
+       b.ReportAllocs()
+       b.ResetTimer()
+       for i := 0; i < b.N; i++ {
+               _, err := mc.interpolateParams(q, args)
+               if err != nil {
+                       b.Fatal(err)
+               }
+       }
+}
+
+func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+       db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
+
+       tb := (*TB)(b)
+       stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
+       defer stmt.Close()
+
+       b.SetParallelism(p)
+       b.ReportAllocs()
+       b.ResetTimer()
+       b.RunParallel(func(pb *testing.PB) {
+               var got string
+               for pb.Next() {
+                       tb.check(stmt.QueryRow(1).Scan(&got))
+                       if got != "one" {
+                               b.Fatalf("query = %q; want one", got)
+                       }
+               }
+       })
+}
+
+func BenchmarkQueryContext(b *testing.B) {
+       db := initDB(b,
+               "DROP TABLE IF EXISTS foo",
+               "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
+               `INSERT INTO foo VALUES (1, "one")`,
+               `INSERT INTO foo VALUES (2, "two")`,
+       )
+       defer db.Close()
+       for _, p := range []int{1, 2, 3, 4} {
+               b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
+                       benchmarkQueryContext(b, db, p)
+               })
+       }
+}
+
+func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+       db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
+
+       tb := (*TB)(b)
+       stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
+       defer stmt.Close()
+
+       b.SetParallelism(p)
+       b.ReportAllocs()
+       b.ResetTimer()
+       b.RunParallel(func(pb *testing.PB) {
+               for pb.Next() {
+                       if _, err := stmt.ExecContext(ctx); err != nil {
+                               b.Fatal(err)
+                       }
+               }
+       })
+}
+
+func BenchmarkExecContext(b *testing.B) {
+       db := initDB(b,
+               "DROP TABLE IF EXISTS foo",
+               "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
+               `INSERT INTO foo VALUES (1, "one")`,
+               `INSERT INTO foo VALUES (2, "two")`,
+       )
+       defer db.Close()
+       for _, p := range []int{1, 2, 3, 4} {
+               b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
+                       benchmarkQueryContext(b, db, p)
+               })
+       }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
new file mode 100644 (file)
index 0000000..eb4748b
--- /dev/null
@@ -0,0 +1,147 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "io"
+       "net"
+       "time"
+)
+
+const defaultBufSize = 4096
+
+// A buffer which is used for both reading and writing.
+// This is possible since communication on each connection is synchronous.
+// In other words, we can't write and read simultaneously on the same connection.
+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
+// Also highly optimized for this particular use case.
+type buffer struct {
+       buf     []byte
+       nc      net.Conn
+       idx     int
+       length  int
+       timeout time.Duration
+}
+
+func newBuffer(nc net.Conn) buffer {
+       var b [defaultBufSize]byte
+       return buffer{
+               buf: b[:],
+               nc:  nc,
+       }
+}
+
+// fill reads into the buffer until at least _need_ bytes are in it
+func (b *buffer) fill(need int) error {
+       n := b.length
+
+       // move existing data to the beginning
+       if n > 0 && b.idx > 0 {
+               copy(b.buf[0:n], b.buf[b.idx:])
+       }
+
+       // grow buffer if necessary
+       // TODO: let the buffer shrink again at some point
+       //       Maybe keep the org buf slice and swap back?
+       if need > len(b.buf) {
+               // Round up to the next multiple of the default size
+               newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
+               copy(newBuf, b.buf)
+               b.buf = newBuf
+       }
+
+       b.idx = 0
+
+       for {
+               if b.timeout > 0 {
+                       if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+                               return err
+                       }
+               }
+
+               nn, err := b.nc.Read(b.buf[n:])
+               n += nn
+
+               switch err {
+               case nil:
+                       if n < need {
+                               continue
+                       }
+                       b.length = n
+                       return nil
+
+               case io.EOF:
+                       if n >= need {
+                               b.length = n
+                               return nil
+                       }
+                       return io.ErrUnexpectedEOF
+
+               default:
+                       return err
+               }
+       }
+}
+
+// returns next N bytes from buffer.
+// The returned slice is only guaranteed to be valid until the next read
+func (b *buffer) readNext(need int) ([]byte, error) {
+       if b.length < need {
+               // refill
+               if err := b.fill(need); err != nil {
+                       return nil, err
+               }
+       }
+
+       offset := b.idx
+       b.idx += need
+       b.length -= need
+       return b.buf[offset:b.idx], nil
+}
+
+// returns a buffer with the requested size.
+// If possible, a slice from the existing buffer is returned.
+// Otherwise a bigger buffer is made.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeBuffer(length int) []byte {
+       if b.length > 0 {
+               return nil
+       }
+
+       // test (cheap) general case first
+       if length <= defaultBufSize || length <= cap(b.buf) {
+               return b.buf[:length]
+       }
+
+       if length < maxPacketSize {
+               b.buf = make([]byte, length)
+               return b.buf
+       }
+       return make([]byte, length)
+}
+
+// shortcut which can be used if the requested buffer is guaranteed to be
+// smaller than defaultBufSize
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeSmallBuffer(length int) []byte {
+       if b.length > 0 {
+               return nil
+       }
+       return b.buf[:length]
+}
+
+// takeCompleteBuffer returns the complete existing buffer.
+// This can be used if the necessary buffer size is unknown.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeCompleteBuffer() []byte {
+       if b.length > 0 {
+               return nil
+       }
+       return b.buf
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
new file mode 100644 (file)
index 0000000..136c9e4
--- /dev/null
@@ -0,0 +1,251 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const defaultCollation = "utf8_general_ci"
+const binaryCollation = "binary"
+
+// A list of available collations mapped to the internal ID.
+// To update this map use the following MySQL query:
+//     SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
+var collations = map[string]byte{
+       "big5_chinese_ci":          1,
+       "latin2_czech_cs":          2,
+       "dec8_swedish_ci":          3,
+       "cp850_general_ci":         4,
+       "latin1_german1_ci":        5,
+       "hp8_english_ci":           6,
+       "koi8r_general_ci":         7,
+       "latin1_swedish_ci":        8,
+       "latin2_general_ci":        9,
+       "swe7_swedish_ci":          10,
+       "ascii_general_ci":         11,
+       "ujis_japanese_ci":         12,
+       "sjis_japanese_ci":         13,
+       "cp1251_bulgarian_ci":      14,
+       "latin1_danish_ci":         15,
+       "hebrew_general_ci":        16,
+       "tis620_thai_ci":           18,
+       "euckr_korean_ci":          19,
+       "latin7_estonian_cs":       20,
+       "latin2_hungarian_ci":      21,
+       "koi8u_general_ci":         22,
+       "cp1251_ukrainian_ci":      23,
+       "gb2312_chinese_ci":        24,
+       "greek_general_ci":         25,
+       "cp1250_general_ci":        26,
+       "latin2_croatian_ci":       27,
+       "gbk_chinese_ci":           28,
+       "cp1257_lithuanian_ci":     29,
+       "latin5_turkish_ci":        30,
+       "latin1_german2_ci":        31,
+       "armscii8_general_ci":      32,
+       "utf8_general_ci":          33,
+       "cp1250_czech_cs":          34,
+       "ucs2_general_ci":          35,
+       "cp866_general_ci":         36,
+       "keybcs2_general_ci":       37,
+       "macce_general_ci":         38,
+       "macroman_general_ci":      39,
+       "cp852_general_ci":         40,
+       "latin7_general_ci":        41,
+       "latin7_general_cs":        42,
+       "macce_bin":                43,
+       "cp1250_croatian_ci":       44,
+       "utf8mb4_general_ci":       45,
+       "utf8mb4_bin":              46,
+       "latin1_bin":               47,
+       "latin1_general_ci":        48,
+       "latin1_general_cs":        49,
+       "cp1251_bin":               50,
+       "cp1251_general_ci":        51,
+       "cp1251_general_cs":        52,
+       "macroman_bin":             53,
+       "utf16_general_ci":         54,
+       "utf16_bin":                55,
+       "utf16le_general_ci":       56,
+       "cp1256_general_ci":        57,
+       "cp1257_bin":               58,
+       "cp1257_general_ci":        59,
+       "utf32_general_ci":         60,
+       "utf32_bin":                61,
+       "utf16le_bin":              62,
+       "binary":                   63,
+       "armscii8_bin":             64,
+       "ascii_bin":                65,
+       "cp1250_bin":               66,
+       "cp1256_bin":               67,
+       "cp866_bin":                68,
+       "dec8_bin":                 69,
+       "greek_bin":                70,
+       "hebrew_bin":               71,
+       "hp8_bin":                  72,
+       "keybcs2_bin":              73,
+       "koi8r_bin":                74,
+       "koi8u_bin":                75,
+       "latin2_bin":               77,
+       "latin5_bin":               78,
+       "latin7_bin":               79,
+       "cp850_bin":                80,
+       "cp852_bin":                81,
+       "swe7_bin":                 82,
+       "utf8_bin":                 83,
+       "big5_bin":                 84,
+       "euckr_bin":                85,
+       "gb2312_bin":               86,
+       "gbk_bin":                  87,
+       "sjis_bin":                 88,
+       "tis620_bin":               89,
+       "ucs2_bin":                 90,
+       "ujis_bin":                 91,
+       "geostd8_general_ci":       92,
+       "geostd8_bin":              93,
+       "latin1_spanish_ci":        94,
+       "cp932_japanese_ci":        95,
+       "cp932_bin":                96,
+       "eucjpms_japanese_ci":      97,
+       "eucjpms_bin":              98,
+       "cp1250_polish_ci":         99,
+       "utf16_unicode_ci":         101,
+       "utf16_icelandic_ci":       102,
+       "utf16_latvian_ci":         103,
+       "utf16_romanian_ci":        104,
+       "utf16_slovenian_ci":       105,
+       "utf16_polish_ci":          106,
+       "utf16_estonian_ci":        107,
+       "utf16_spanish_ci":         108,
+       "utf16_swedish_ci":         109,
+       "utf16_turkish_ci":         110,
+       "utf16_czech_ci":           111,
+       "utf16_danish_ci":          112,
+       "utf16_lithuanian_ci":      113,
+       "utf16_slovak_ci":          114,
+       "utf16_spanish2_ci":        115,
+       "utf16_roman_ci":           116,
+       "utf16_persian_ci":         117,
+       "utf16_esperanto_ci":       118,
+       "utf16_hungarian_ci":       119,
+       "utf16_sinhala_ci":         120,
+       "utf16_german2_ci":         121,
+       "utf16_croatian_ci":        122,
+       "utf16_unicode_520_ci":     123,
+       "utf16_vietnamese_ci":      124,
+       "ucs2_unicode_ci":          128,
+       "ucs2_icelandic_ci":        129,
+       "ucs2_latvian_ci":          130,
+       "ucs2_romanian_ci":         131,
+       "ucs2_slovenian_ci":        132,
+       "ucs2_polish_ci":           133,
+       "ucs2_estonian_ci":         134,
+       "ucs2_spanish_ci":          135,
+       "ucs2_swedish_ci":          136,
+       "ucs2_turkish_ci":          137,
+       "ucs2_czech_ci":            138,
+       "ucs2_danish_ci":           139,
+       "ucs2_lithuanian_ci":       140,
+       "ucs2_slovak_ci":           141,
+       "ucs2_spanish2_ci":         142,
+       "ucs2_roman_ci":            143,
+       "ucs2_persian_ci":          144,
+       "ucs2_esperanto_ci":        145,
+       "ucs2_hungarian_ci":        146,
+       "ucs2_sinhala_ci":          147,
+       "ucs2_german2_ci":          148,
+       "ucs2_croatian_ci":         149,
+       "ucs2_unicode_520_ci":      150,
+       "ucs2_vietnamese_ci":       151,
+       "ucs2_general_mysql500_ci": 159,
+       "utf32_unicode_ci":         160,
+       "utf32_icelandic_ci":       161,
+       "utf32_latvian_ci":         162,
+       "utf32_romanian_ci":        163,
+       "utf32_slovenian_ci":       164,
+       "utf32_polish_ci":          165,
+       "utf32_estonian_ci":        166,
+       "utf32_spanish_ci":         167,
+       "utf32_swedish_ci":         168,
+       "utf32_turkish_ci":         169,
+       "utf32_czech_ci":           170,
+       "utf32_danish_ci":          171,
+       "utf32_lithuanian_ci":      172,
+       "utf32_slovak_ci":          173,
+       "utf32_spanish2_ci":        174,
+       "utf32_roman_ci":           175,
+       "utf32_persian_ci":         176,
+       "utf32_esperanto_ci":       177,
+       "utf32_hungarian_ci":       178,
+       "utf32_sinhala_ci":         179,
+       "utf32_german2_ci":         180,
+       "utf32_croatian_ci":        181,
+       "utf32_unicode_520_ci":     182,
+       "utf32_vietnamese_ci":      183,
+       "utf8_unicode_ci":          192,
+       "utf8_icelandic_ci":        193,
+       "utf8_latvian_ci":          194,
+       "utf8_romanian_ci":         195,
+       "utf8_slovenian_ci":        196,
+       "utf8_polish_ci":           197,
+       "utf8_estonian_ci":         198,
+       "utf8_spanish_ci":          199,
+       "utf8_swedish_ci":          200,
+       "utf8_turkish_ci":          201,
+       "utf8_czech_ci":            202,
+       "utf8_danish_ci":           203,
+       "utf8_lithuanian_ci":       204,
+       "utf8_slovak_ci":           205,
+       "utf8_spanish2_ci":         206,
+       "utf8_roman_ci":            207,
+       "utf8_persian_ci":          208,
+       "utf8_esperanto_ci":        209,
+       "utf8_hungarian_ci":        210,
+       "utf8_sinhala_ci":          211,
+       "utf8_german2_ci":          212,
+       "utf8_croatian_ci":         213,
+       "utf8_unicode_520_ci":      214,
+       "utf8_vietnamese_ci":       215,
+       "utf8_general_mysql500_ci": 223,
+       "utf8mb4_unicode_ci":       224,
+       "utf8mb4_icelandic_ci":     225,
+       "utf8mb4_latvian_ci":       226,
+       "utf8mb4_romanian_ci":      227,
+       "utf8mb4_slovenian_ci":     228,
+       "utf8mb4_polish_ci":        229,
+       "utf8mb4_estonian_ci":      230,
+       "utf8mb4_spanish_ci":       231,
+       "utf8mb4_swedish_ci":       232,
+       "utf8mb4_turkish_ci":       233,
+       "utf8mb4_czech_ci":         234,
+       "utf8mb4_danish_ci":        235,
+       "utf8mb4_lithuanian_ci":    236,
+       "utf8mb4_slovak_ci":        237,
+       "utf8mb4_spanish2_ci":      238,
+       "utf8mb4_roman_ci":         239,
+       "utf8mb4_persian_ci":       240,
+       "utf8mb4_esperanto_ci":     241,
+       "utf8mb4_hungarian_ci":     242,
+       "utf8mb4_sinhala_ci":       243,
+       "utf8mb4_german2_ci":       244,
+       "utf8mb4_croatian_ci":      245,
+       "utf8mb4_unicode_520_ci":   246,
+       "utf8mb4_vietnamese_ci":    247,
+}
+
+// A blacklist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+       "big5_chinese_ci":   true,
+       "sjis_japanese_ci":  true,
+       "gbk_chinese_ci":    true,
+       "big5_bin":          true,
+       "gb2312_bin":        true,
+       "gbk_bin":           true,
+       "sjis_bin":          true,
+       "cp932_japanese_ci": true,
+       "cp932_bin":         true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
new file mode 100644 (file)
index 0000000..911be20
--- /dev/null
@@ -0,0 +1,654 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "context"
+       "database/sql"
+       "database/sql/driver"
+       "io"
+       "net"
+       "strconv"
+       "strings"
+       "time"
+)
+
+// a copy of context.Context for Go 1.7 and earlier
+type mysqlContext interface {
+       Done() <-chan struct{}
+       Err() error
+
+       // defined in context.Context, but not used in this driver:
+       // Deadline() (deadline time.Time, ok bool)
+       // Value(key interface{}) interface{}
+}
+
+type mysqlConn struct {
+       buf              buffer
+       netConn          net.Conn
+       affectedRows     uint64
+       insertId         uint64
+       cfg              *Config
+       maxAllowedPacket int
+       maxWriteSize     int
+       writeTimeout     time.Duration
+       flags            clientFlag
+       status           statusFlag
+       sequence         uint8
+       parseTime        bool
+
+       // for context support (Go 1.8+)
+       watching bool
+       watcher  chan<- mysqlContext
+       closech  chan struct{}
+       finished chan<- struct{}
+       canceled atomicError // set non-nil if conn is canceled
+       closed   atomicBool  // set when conn is closed, before closech is closed
+}
+
+// Handles parameters set in DSN after the connection is established
+func (mc *mysqlConn) handleParams() (err error) {
+       for param, val := range mc.cfg.Params {
+               switch param {
+               // Charset
+               case "charset":
+                       charsets := strings.Split(val, ",")
+                       for i := range charsets {
+                               // ignore errors here - a charset may not exist
+                               err = mc.exec("SET NAMES " + charsets[i])
+                               if err == nil {
+                                       break
+                               }
+                       }
+                       if err != nil {
+                               return
+                       }
+
+               // System Vars
+               default:
+                       err = mc.exec("SET " + param + "=" + val + "")
+                       if err != nil {
+                               return
+                       }
+               }
+       }
+
+       return
+}
+
+func (mc *mysqlConn) markBadConn(err error) error {
+       if mc == nil {
+               return err
+       }
+       if err != errBadConnNoWrite {
+               return err
+       }
+       return driver.ErrBadConn
+}
+
+func (mc *mysqlConn) Begin() (driver.Tx, error) {
+       return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+       if mc.closed.IsSet() {
+               errLog.Print(ErrInvalidConn)
+               return nil, driver.ErrBadConn
+       }
+       var q string
+       if readOnly {
+               q = "START TRANSACTION READ ONLY"
+       } else {
+               q = "START TRANSACTION"
+       }
+       err := mc.exec(q)
+       if err == nil {
+               return &mysqlTx{mc}, err
+       }
+       return nil, mc.markBadConn(err)
+}
+
+func (mc *mysqlConn) Close() (err error) {
+       // Makes Close idempotent
+       if !mc.closed.IsSet() {
+               err = mc.writeCommandPacket(comQuit)
+       }
+
+       mc.cleanup()
+
+       return
+}
+
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+       if !mc.closed.TrySet(true) {
+               return
+       }
+
+       // Makes cleanup idempotent
+       close(mc.closech)
+       if mc.netConn == nil {
+               return
+       }
+       if err := mc.netConn.Close(); err != nil {
+               errLog.Print(err)
+       }
+}
+
+func (mc *mysqlConn) error() error {
+       if mc.closed.IsSet() {
+               if err := mc.canceled.Value(); err != nil {
+                       return err
+               }
+               return ErrInvalidConn
+       }
+       return nil
+}
+
+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
+       if mc.closed.IsSet() {
+               errLog.Print(ErrInvalidConn)
+               return nil, driver.ErrBadConn
+       }
+       // Send command
+       err := mc.writeCommandPacketStr(comStmtPrepare, query)
+       if err != nil {
+               return nil, mc.markBadConn(err)
+       }
+
+       stmt := &mysqlStmt{
+               mc: mc,
+       }
+
+       // Read Result
+       columnCount, err := stmt.readPrepareResultPacket()
+       if err == nil {
+               if stmt.paramCount > 0 {
+                       if err = mc.readUntilEOF(); err != nil {
+                               return nil, err
+                       }
+               }
+
+               if columnCount > 0 {
+                       err = mc.readUntilEOF()
+               }
+       }
+
+       return stmt, err
+}
+
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+       // Number of ? should be same to len(args)
+       if strings.Count(query, "?") != len(args) {
+               return "", driver.ErrSkip
+       }
+
+       buf := mc.buf.takeCompleteBuffer()
+       if buf == nil {
+               // can not take the buffer. Something must be wrong with the connection
+               errLog.Print(ErrBusyBuffer)
+               return "", ErrInvalidConn
+       }
+       buf = buf[:0]
+       argPos := 0
+
+       for i := 0; i < len(query); i++ {
+               q := strings.IndexByte(query[i:], '?')
+               if q == -1 {
+                       buf = append(buf, query[i:]...)
+                       break
+               }
+               buf = append(buf, query[i:i+q]...)
+               i += q
+
+               arg := args[argPos]
+               argPos++
+
+               if arg == nil {
+                       buf = append(buf, "NULL"...)
+                       continue
+               }
+
+               switch v := arg.(type) {
+               case int64:
+                       buf = strconv.AppendInt(buf, v, 10)
+               case float64:
+                       buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+               case bool:
+                       if v {
+                               buf = append(buf, '1')
+                       } else {
+                               buf = append(buf, '0')
+                       }
+               case time.Time:
+                       if v.IsZero() {
+                               buf = append(buf, "'0000-00-00'"...)
+                       } else {
+                               v := v.In(mc.cfg.Loc)
+                               v = v.Add(time.Nanosecond * 500) // To round under microsecond
+                               year := v.Year()
+                               year100 := year / 100
+                               year1 := year % 100
+                               month := v.Month()
+                               day := v.Day()
+                               hour := v.Hour()
+                               minute := v.Minute()
+                               second := v.Second()
+                               micro := v.Nanosecond() / 1000
+
+                               buf = append(buf, []byte{
+                                       '\'',
+                                       digits10[year100], digits01[year100],
+                                       digits10[year1], digits01[year1],
+                                       '-',
+                                       digits10[month], digits01[month],
+                                       '-',
+                                       digits10[day], digits01[day],
+                                       ' ',
+                                       digits10[hour], digits01[hour],
+                                       ':',
+                                       digits10[minute], digits01[minute],
+                                       ':',
+                                       digits10[second], digits01[second],
+                               }...)
+
+                               if micro != 0 {
+                                       micro10000 := micro / 10000
+                                       micro100 := micro / 100 % 100
+                                       micro1 := micro % 100
+                                       buf = append(buf, []byte{
+                                               '.',
+                                               digits10[micro10000], digits01[micro10000],
+                                               digits10[micro100], digits01[micro100],
+                                               digits10[micro1], digits01[micro1],
+                                       }...)
+                               }
+                               buf = append(buf, '\'')
+                       }
+               case []byte:
+                       if v == nil {
+                               buf = append(buf, "NULL"...)
+                       } else {
+                               buf = append(buf, "_binary'"...)
+                               if mc.status&statusNoBackslashEscapes == 0 {
+                                       buf = escapeBytesBackslash(buf, v)
+                               } else {
+                                       buf = escapeBytesQuotes(buf, v)
+                               }
+                               buf = append(buf, '\'')
+                       }
+               case string:
+                       buf = append(buf, '\'')
+                       if mc.status&statusNoBackslashEscapes == 0 {
+                               buf = escapeStringBackslash(buf, v)
+                       } else {
+                               buf = escapeStringQuotes(buf, v)
+                       }
+                       buf = append(buf, '\'')
+               default:
+                       return "", driver.ErrSkip
+               }
+
+               if len(buf)+4 > mc.maxAllowedPacket {
+                       return "", driver.ErrSkip
+               }
+       }
+       if argPos != len(args) {
+               return "", driver.ErrSkip
+       }
+       return string(buf), nil
+}
+
+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+       if mc.closed.IsSet() {
+               errLog.Print(ErrInvalidConn)
+               return nil, driver.ErrBadConn
+       }
+       if len(args) != 0 {
+               if !mc.cfg.InterpolateParams {
+                       return nil, driver.ErrSkip
+               }
+               // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+               prepared, err := mc.interpolateParams(query, args)
+               if err != nil {
+                       return nil, err
+               }
+               query = prepared
+       }
+       mc.affectedRows = 0
+       mc.insertId = 0
+
+       err := mc.exec(query)
+       if err == nil {
+               return &mysqlResult{
+                       affectedRows: int64(mc.affectedRows),
+                       insertId:     int64(mc.insertId),
+               }, err
+       }
+       return nil, mc.markBadConn(err)
+}
+
+// Internal function to execute commands
+func (mc *mysqlConn) exec(query string) error {
+       // Send command
+       if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+               return mc.markBadConn(err)
+       }
+
+       // Read Result
+       resLen, err := mc.readResultSetHeaderPacket()
+       if err != nil {
+               return err
+       }
+
+       if resLen > 0 {
+               // columns
+               if err := mc.readUntilEOF(); err != nil {
+                       return err
+               }
+
+               // rows
+               if err := mc.readUntilEOF(); err != nil {
+                       return err
+               }
+       }
+
+       return mc.discardResults()
+}
+
+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+       return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+       if mc.closed.IsSet() {
+               errLog.Print(ErrInvalidConn)
+               return nil, driver.ErrBadConn
+       }
+       if len(args) != 0 {
+               if !mc.cfg.InterpolateParams {
+                       return nil, driver.ErrSkip
+               }
+               // try client-side prepare to reduce roundtrip
+               prepared, err := mc.interpolateParams(query, args)
+               if err != nil {
+                       return nil, err
+               }
+               query = prepared
+       }
+       // Send command
+       err := mc.writeCommandPacketStr(comQuery, query)
+       if err == nil {
+               // Read Result
+               var resLen int
+               resLen, err = mc.readResultSetHeaderPacket()
+               if err == nil {
+                       rows := new(textRows)
+                       rows.mc = mc
+
+                       if resLen == 0 {
+                               rows.rs.done = true
+
+                               switch err := rows.NextResultSet(); err {
+                               case nil, io.EOF:
+                                       return rows, nil
+                               default:
+                                       return nil, err
+                               }
+                       }
+
+                       // Columns
+                       rows.rs.columns, err = mc.readColumns(resLen)
+                       return rows, err
+               }
+       }
+       return nil, mc.markBadConn(err)
+}
+
+// Gets the value of the given MySQL System Variable
+// The returned byte slice is only valid until the next read
+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
+       // Send command
+       if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
+               return nil, err
+       }
+
+       // Read Result
+       resLen, err := mc.readResultSetHeaderPacket()
+       if err == nil {
+               rows := new(textRows)
+               rows.mc = mc
+               rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+
+               if resLen > 0 {
+                       // Columns
+                       if err := mc.readUntilEOF(); err != nil {
+                               return nil, err
+                       }
+               }
+
+               dest := make([]driver.Value, resLen)
+               if err = rows.readRow(dest); err == nil {
+                       return dest[0].([]byte), mc.readUntilEOF()
+               }
+       }
+       return nil, err
+}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+       mc.canceled.Set(err)
+       mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+       if !mc.watching || mc.finished == nil {
+               return
+       }
+       select {
+       case mc.finished <- struct{}{}:
+               mc.watching = false
+       case <-mc.closech:
+       }
+}
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
+       if mc.closed.IsSet() {
+               errLog.Print(ErrInvalidConn)
+               return driver.ErrBadConn
+       }
+
+       if err = mc.watchCancel(ctx); err != nil {
+               return
+       }
+       defer mc.finish()
+
+       if err = mc.writeCommandPacket(comPing); err != nil {
+               return
+       }
+
+       return mc.readResultOK()
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+       if err := mc.watchCancel(ctx); err != nil {
+               return nil, err
+       }
+       defer mc.finish()
+
+       if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+               level, err := mapIsolationLevel(opts.Isolation)
+               if err != nil {
+                       return nil, err
+               }
+               err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+       dargs, err := namedValueToValue(args)
+       if err != nil {
+               return nil, err
+       }
+
+       if err := mc.watchCancel(ctx); err != nil {
+               return nil, err
+       }
+
+       rows, err := mc.query(query, dargs)
+       if err != nil {
+               mc.finish()
+               return nil, err
+       }
+       rows.finish = mc.finish
+       return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+       dargs, err := namedValueToValue(args)
+       if err != nil {
+               return nil, err
+       }
+
+       if err := mc.watchCancel(ctx); err != nil {
+               return nil, err
+       }
+       defer mc.finish()
+
+       return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+       if err := mc.watchCancel(ctx); err != nil {
+               return nil, err
+       }
+
+       stmt, err := mc.Prepare(query)
+       mc.finish()
+       if err != nil {
+               return nil, err
+       }
+
+       select {
+       default:
+       case <-ctx.Done():
+               stmt.Close()
+               return nil, ctx.Err()
+       }
+       return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+       dargs, err := namedValueToValue(args)
+       if err != nil {
+               return nil, err
+       }
+
+       if err := stmt.mc.watchCancel(ctx); err != nil {
+               return nil, err
+       }
+
+       rows, err := stmt.query(dargs)
+       if err != nil {
+               stmt.mc.finish()
+               return nil, err
+       }
+       rows.finish = stmt.mc.finish
+       return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+       dargs, err := namedValueToValue(args)
+       if err != nil {
+               return nil, err
+       }
+
+       if err := stmt.mc.watchCancel(ctx); err != nil {
+               return nil, err
+       }
+       defer stmt.mc.finish()
+
+       return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+       if mc.watching {
+               // Reach here if canceled,
+               // so the connection is already invalid
+               mc.cleanup()
+               return nil
+       }
+       if ctx.Done() == nil {
+               return nil
+       }
+
+       mc.watching = true
+       select {
+       default:
+       case <-ctx.Done():
+               return ctx.Err()
+       }
+       if mc.watcher == nil {
+               return nil
+       }
+
+       mc.watcher <- ctx
+
+       return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+       watcher := make(chan mysqlContext, 1)
+       mc.watcher = watcher
+       finished := make(chan struct{})
+       mc.finished = finished
+       go func() {
+               for {
+                       var ctx mysqlContext
+                       select {
+                       case ctx = <-watcher:
+                       case <-mc.closech:
+                               return
+                       }
+
+                       select {
+                       case <-ctx.Done():
+                               mc.cancel(ctx.Err())
+                       case <-finished:
+                       case <-mc.closech:
+                               return
+                       }
+               }
+       }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+       nv.Value, err = converter{}.ConvertValue(nv.Value)
+       return
+}
+
+// ResetSession implements driver.SessionResetter.
+// (From Go 1.10)
+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
+       if mc.closed.IsSet() {
+               return driver.ErrBadConn
+       }
+       return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_test.go b/vendor/github.com/go-sql-driver/mysql/connection_test.go
new file mode 100644 (file)
index 0000000..dec3761
--- /dev/null
@@ -0,0 +1,81 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "database/sql/driver"
+       "testing"
+)
+
+func TestInterpolateParams(t *testing.T) {
+       mc := &mysqlConn{
+               buf:              newBuffer(nil),
+               maxAllowedPacket: maxPacketSize,
+               cfg: &Config{
+                       InterpolateParams: true,
+               },
+       }
+
+       q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"})
+       if err != nil {
+               t.Errorf("Expected err=nil, got %#v", err)
+               return
+       }
+       expected := `SELECT 42+'gopher'`
+       if q != expected {
+               t.Errorf("Expected: %q\nGot: %q", expected, q)
+       }
+}
+
+func TestInterpolateParamsTooManyPlaceholders(t *testing.T) {
+       mc := &mysqlConn{
+               buf:              newBuffer(nil),
+               maxAllowedPacket: maxPacketSize,
+               cfg: &Config{
+                       InterpolateParams: true,
+               },
+       }
+
+       q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)})
+       if err != driver.ErrSkip {
+               t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
+       }
+}
+
+// We don't support placeholder in string literal for now.
+// https://github.com/go-sql-driver/mysql/pull/490
+func TestInterpolateParamsPlaceholderInString(t *testing.T) {
+       mc := &mysqlConn{
+               buf:              newBuffer(nil),
+               maxAllowedPacket: maxPacketSize,
+               cfg: &Config{
+                       InterpolateParams: true,
+               },
+       }
+
+       q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)})
+       // When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42`
+       if err != driver.ErrSkip {
+               t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
+       }
+}
+
+func TestCheckNamedValue(t *testing.T) {
+       value := driver.NamedValue{Value: ^uint64(0)}
+       x := &mysqlConn{}
+       err := x.CheckNamedValue(&value)
+
+       if err != nil {
+               t.Fatal("uint64 high-bit not convertible", err)
+       }
+
+       if value.Value != "18446744073709551615" {
+               t.Fatalf("uint64 high-bit not converted, got %#v %T", value.Value, value.Value)
+       }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
new file mode 100644 (file)
index 0000000..b1e6b85
--- /dev/null
@@ -0,0 +1,174 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const (
+       defaultAuthPlugin       = "mysql_native_password"
+       defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+       minProtocolVersion      = 10
+       maxPacketSize           = 1<<24 - 1
+       timeFormat              = "2006-01-02 15:04:05.999999"
+)
+
+// MySQL constants documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+const (
+       iOK           byte = 0x00
+       iAuthMoreData byte = 0x01
+       iLocalInFile  byte = 0xfb
+       iEOF          byte = 0xfe
+       iERR          byte = 0xff
+)
+
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
+type clientFlag uint32
+
+const (
+       clientLongPassword clientFlag = 1 << iota
+       clientFoundRows
+       clientLongFlag
+       clientConnectWithDB
+       clientNoSchema
+       clientCompress
+       clientODBC
+       clientLocalFiles
+       clientIgnoreSpace
+       clientProtocol41
+       clientInteractive
+       clientSSL
+       clientIgnoreSIGPIPE
+       clientTransactions
+       clientReserved
+       clientSecureConn
+       clientMultiStatements
+       clientMultiResults
+       clientPSMultiResults
+       clientPluginAuth
+       clientConnectAttrs
+       clientPluginAuthLenEncClientData
+       clientCanHandleExpiredPasswords
+       clientSessionTrack
+       clientDeprecateEOF
+)
+
+const (
+       comQuit byte = iota + 1
+       comInitDB
+       comQuery
+       comFieldList
+       comCreateDB
+       comDropDB
+       comRefresh
+       comShutdown
+       comStatistics
+       comProcessInfo
+       comConnect
+       comProcessKill
+       comDebug
+       comPing
+       comTime
+       comDelayedInsert
+       comChangeUser
+       comBinlogDump
+       comTableDump
+       comConnectOut
+       comRegisterSlave
+       comStmtPrepare
+       comStmtExecute
+       comStmtSendLongData
+       comStmtClose
+       comStmtReset
+       comSetOption
+       comStmtFetch
+)
+
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
+const (
+       fieldTypeDecimal fieldType = iota
+       fieldTypeTiny
+       fieldTypeShort
+       fieldTypeLong
+       fieldTypeFloat
+       fieldTypeDouble
+       fieldTypeNULL
+       fieldTypeTimestamp
+       fieldTypeLongLong
+       fieldTypeInt24
+       fieldTypeDate
+       fieldTypeTime
+       fieldTypeDateTime
+       fieldTypeYear
+       fieldTypeNewDate
+       fieldTypeVarChar
+       fieldTypeBit
+)
+const (
+       fieldTypeJSON fieldType = iota + 0xf5
+       fieldTypeNewDecimal
+       fieldTypeEnum
+       fieldTypeSet
+       fieldTypeTinyBLOB
+       fieldTypeMediumBLOB
+       fieldTypeLongBLOB
+       fieldTypeBLOB
+       fieldTypeVarString
+       fieldTypeString
+       fieldTypeGeometry
+)
+
+type fieldFlag uint16
+
+const (
+       flagNotNULL fieldFlag = 1 << iota
+       flagPriKey
+       flagUniqueKey
+       flagMultipleKey
+       flagBLOB
+       flagUnsigned
+       flagZeroFill
+       flagBinary
+       flagEnum
+       flagAutoIncrement
+       flagTimestamp
+       flagSet
+       flagUnknown1
+       flagUnknown2
+       flagUnknown3
+       flagUnknown4
+)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+       statusInTrans statusFlag = 1 << iota
+       statusInAutocommit
+       statusReserved // Not in documentation
+       statusMoreResultsExists
+       statusNoGoodIndexUsed
+       statusNoIndexUsed
+       statusCursorExists
+       statusLastRowSent
+       statusDbDropped
+       statusNoBackslashEscapes
+       statusMetadataChanged
+       statusQueryWasSlow
+       statusPsOutParams
+       statusInTransReadonly
+       statusSessionStateChanged
+)
+
+const (
+       cachingSha2PasswordRequestPublicKey          = 2
+       cachingSha2PasswordFastAuthSuccess           = 3
+       cachingSha2PasswordPerformFullAuthentication = 4
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
new file mode 100644 (file)
index 0000000..ba12978
--- /dev/null
@@ -0,0 +1,165 @@
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Package mysql provides a MySQL driver for Go's database/sql package.
+//
+// The driver should be used via the database/sql package:
+//
+//  import "database/sql"
+//  import _ "github.com/go-sql-driver/mysql"
+//
+//  db, err := sql.Open("mysql", "user:password@/dbname")
+//
+// See https://github.com/go-sql-driver/mysql#usage for details
+package mysql
+
+import (
+       "database/sql"
+       "database/sql/driver"
+       "net"
+       "sync"
+)
+
+// MySQLDriver is exported to make the driver directly accessible.
+// In general the driver is used via the database/sql package.
+type MySQLDriver struct{}
+
+// DialFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDial
+type DialFunc func(addr string) (net.Conn, error)
+
+var (
+       dialsLock sync.RWMutex
+       dials     map[string]DialFunc
+)
+
+// RegisterDial registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// addr is passed as a parameter to the dial function.
+func RegisterDial(net string, dial DialFunc) {
+       dialsLock.Lock()
+       defer dialsLock.Unlock()
+       if dials == nil {
+               dials = make(map[string]DialFunc)
+       }
+       dials[net] = dial
+}
+
+// Open new Connection.
+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
+// the DSN string is formated
+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
+       var err error
+
+       // New mysqlConn
+       mc := &mysqlConn{
+               maxAllowedPacket: maxPacketSize,
+               maxWriteSize:     maxPacketSize - 1,
+               closech:          make(chan struct{}),
+       }
+       mc.cfg, err = ParseDSN(dsn)
+       if err != nil {
+               return nil, err
+       }
+       mc.parseTime = mc.cfg.ParseTime
+
+       // Connect to Server
+       dialsLock.RLock()
+       dial, ok := dials[mc.cfg.Net]
+       dialsLock.RUnlock()
+       if ok {
+               mc.netConn, err = dial(mc.cfg.Addr)
+       } else {
+               nd := net.Dialer{Timeout: mc.cfg.Timeout}
+               mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
+       }
+       if err != nil {
+               return nil, err
+       }
+
+       // Enable TCP Keepalives on TCP connections
+       if tc, ok := mc.netConn.(*net.TCPConn); ok {
+               if err := tc.SetKeepAlive(true); err != nil {
+                       // Don't send COM_QUIT before handshake.
+                       mc.netConn.Close()
+                       mc.netConn = nil
+                       return nil, err
+               }
+       }
+
+       // Call startWatcher for context support (From Go 1.8)
+       mc.startWatcher()
+
+       mc.buf = newBuffer(mc.netConn)
+
+       // Set I/O timeouts
+       mc.buf.timeout = mc.cfg.ReadTimeout
+       mc.writeTimeout = mc.cfg.WriteTimeout
+
+       // Reading Handshake Initialization Packet
+       authData, plugin, err := mc.readHandshakePacket()
+       if err != nil {
+               mc.cleanup()
+               return nil, err
+       }
+       if plugin == "" {
+               plugin = defaultAuthPlugin
+       }
+
+       // Send Client Authentication Packet
+       authResp, addNUL, err := mc.auth(authData, plugin)
+       if err != nil {
+               // try the default auth plugin, if using the requested plugin failed
+               errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+               plugin = defaultAuthPlugin
+               authResp, addNUL, err = mc.auth(authData, plugin)
+               if err != nil {
+                       mc.cleanup()
+                       return nil, err
+               }
+       }
+       if err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin); err != nil {
+               mc.cleanup()
+               return nil, err
+       }
+
+       // Handle response to auth packet, switch methods if possible
+       if err = mc.handleAuthResult(authData, plugin); err != nil {
+               // Authentication failed and MySQL has already closed the connection
+               // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+               // Do not send COM_QUIT, just cleanup and return the error.
+               mc.cleanup()
+               return nil, err
+       }
+
+       if mc.cfg.MaxAllowedPacket > 0 {
+               mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+       } else {
+               // Get max allowed packet size
+               maxap, err := mc.getSystemVar("max_allowed_packet")
+               if err != nil {
+                       mc.Close()
+                       return nil, err
+               }
+               mc.maxAllowedPacket = stringToInt(maxap) - 1
+       }
+       if mc.maxAllowedPacket < maxPacketSize {
+               mc.maxWriteSize = mc.maxAllowedPacket
+       }
+
+       // Handle DSN Params
+       err = mc.handleParams()
+       if err != nil {
+               mc.Close()
+               return nil, err
+       }
+
+       return mc, nil
+}
+
+func init() {
+       sql.Register("mysql", &MySQLDriver{})
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/go-sql-driver/mysql/driver_test.go
new file mode 100644 (file)
index 0000000..f2bf344
--- /dev/null
@@ -0,0 +1,2862 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "context"
+       "crypto/tls"
+       "database/sql"
+       "database/sql/driver"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "math"
+       "net"
+       "net/url"
+       "os"
+       "reflect"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "testing"
+       "time"
+)
+
+// Ensure that all the driver interfaces are implemented
+var (
+       _ driver.Rows = &binaryRows{}
+       _ driver.Rows = &textRows{}
+)
+
+var (
+       user      string
+       pass      string
+       prot      string
+       addr      string
+       dbname    string
+       dsn       string
+       netAddr   string
+       available bool
+)
+
+var (
+       tDate      = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC)
+       sDate      = "2012-06-14"
+       tDateTime  = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)
+       sDateTime  = "2011-11-20 21:27:37"
+       tDate0     = time.Time{}
+       sDate0     = "0000-00-00"
+       sDateTime0 = "0000-00-00 00:00:00"
+)
+
+// See https://github.com/go-sql-driver/mysql/wiki/Testing
+func init() {
+       // get environment variables
+       env := func(key, defaultValue string) string {
+               if value := os.Getenv(key); value != "" {
+                       return value
+               }
+               return defaultValue
+       }
+       user = env("MYSQL_TEST_USER", "root")
+       pass = env("MYSQL_TEST_PASS", "")
+       prot = env("MYSQL_TEST_PROT", "tcp")
+       addr = env("MYSQL_TEST_ADDR", "localhost:3306")
+       dbname = env("MYSQL_TEST_DBNAME", "gotest")
+       netAddr = fmt.Sprintf("%s(%s)", prot, addr)
+       dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, pass, netAddr, dbname)
+       c, err := net.Dial(prot, addr)
+       if err == nil {
+               available = true
+               c.Close()
+       }
+}
+
+type DBTest struct {
+       *testing.T
+       db *sql.DB
+}
+
+func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
+       if !available {
+               t.Skipf("MySQL server not running on %s", netAddr)
+       }
+
+       dsn += "&multiStatements=true"
+       var db *sql.DB
+       if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
+               db, err = sql.Open("mysql", dsn)
+               if err != nil {
+                       t.Fatalf("error connecting: %s", err.Error())
+               }
+               defer db.Close()
+       }
+
+       dbt := &DBTest{t, db}
+       for _, test := range tests {
+               test(dbt)
+               dbt.db.Exec("DROP TABLE IF EXISTS test")
+       }
+}
+
+func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
+       if !available {
+               t.Skipf("MySQL server not running on %s", netAddr)
+       }
+
+       db, err := sql.Open("mysql", dsn)
+       if err != nil {
+               t.Fatalf("error connecting: %s", err.Error())
+       }
+       defer db.Close()
+
+       db.Exec("DROP TABLE IF EXISTS test")
+
+       dsn2 := dsn + "&interpolateParams=true"
+       var db2 *sql.DB
+       if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation {
+               db2, err = sql.Open("mysql", dsn2)
+               if err != nil {
+                       t.Fatalf("error connecting: %s", err.Error())
+               }
+               defer db2.Close()
+       }
+
+       dsn3 := dsn + "&multiStatements=true"
+       var db3 *sql.DB
+       if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation {
+               db3, err = sql.Open("mysql", dsn3)
+               if err != nil {
+                       t.Fatalf("error connecting: %s", err.Error())
+               }
+               defer db3.Close()
+       }
+
+       dbt := &DBTest{t, db}
+       dbt2 := &DBTest{t, db2}
+       dbt3 := &DBTest{t, db3}
+       for _, test := range tests {
+               test(dbt)
+               dbt.db.Exec("DROP TABLE IF EXISTS test")
+               if db2 != nil {
+                       test(dbt2)
+                       dbt2.db.Exec("DROP TABLE IF EXISTS test")
+               }
+               if db3 != nil {
+                       test(dbt3)
+                       dbt3.db.Exec("DROP TABLE IF EXISTS test")
+               }
+       }
+}
+
+func (dbt *DBTest) fail(method, query string, err error) {
+       if len(query) > 300 {
+               query = "[query too large to print]"
+       }
+       dbt.Fatalf("error on %s %s: %s", method, query, err.Error())
+}
+
+func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
+       res, err := dbt.db.Exec(query, args...)
+       if err != nil {
+               dbt.fail("exec", query, err)
+       }
+       return res
+}
+
+func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
+       rows, err := dbt.db.Query(query, args...)
+       if err != nil {
+               dbt.fail("query", query, err)
+       }
+       return rows
+}
+
+func maybeSkip(t *testing.T, err error, skipErrno uint16) {
+       mySQLErr, ok := err.(*MySQLError)
+       if !ok {
+               return
+       }
+
+       if mySQLErr.Number == skipErrno {
+               t.Skipf("skipping test for error: %v", err)
+       }
+}
+
+func TestEmptyQuery(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               // just a comment, no query
+               rows := dbt.mustQuery("--")
+               // will hang before #255
+               if rows.Next() {
+                       dbt.Errorf("next on rows must be false")
+               }
+       })
+}
+
+func TestCRUD(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               // Create Table
+               dbt.mustExec("CREATE TABLE test (value BOOL)")
+
+               // Test for unexpected data
+               var out bool
+               rows := dbt.mustQuery("SELECT * FROM test")
+               if rows.Next() {
+                       dbt.Error("unexpected data in empty table")
+               }
+
+               // Create Data
+               res := dbt.mustExec("INSERT INTO test VALUES (1)")
+               count, err := res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 1 {
+                       dbt.Fatalf("expected 1 affected row, got %d", count)
+               }
+
+               id, err := res.LastInsertId()
+               if err != nil {
+                       dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error())
+               }
+               if id != 0 {
+                       dbt.Fatalf("expected InsertId 0, got %d", id)
+               }
+
+               // Read
+               rows = dbt.mustQuery("SELECT value FROM test")
+               if rows.Next() {
+                       rows.Scan(&out)
+                       if true != out {
+                               dbt.Errorf("true != %t", out)
+                       }
+
+                       if rows.Next() {
+                               dbt.Error("unexpected data")
+                       }
+               } else {
+                       dbt.Error("no data")
+               }
+
+               // Update
+               res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true)
+               count, err = res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 1 {
+                       dbt.Fatalf("expected 1 affected row, got %d", count)
+               }
+
+               // Check Update
+               rows = dbt.mustQuery("SELECT value FROM test")
+               if rows.Next() {
+                       rows.Scan(&out)
+                       if false != out {
+                               dbt.Errorf("false != %t", out)
+                       }
+
+                       if rows.Next() {
+                               dbt.Error("unexpected data")
+                       }
+               } else {
+                       dbt.Error("no data")
+               }
+
+               // Delete
+               res = dbt.mustExec("DELETE FROM test WHERE value = ?", false)
+               count, err = res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 1 {
+                       dbt.Fatalf("expected 1 affected row, got %d", count)
+               }
+
+               // Check for unexpected rows
+               res = dbt.mustExec("DELETE FROM test")
+               count, err = res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 0 {
+                       dbt.Fatalf("expected 0 affected row, got %d", count)
+               }
+       })
+}
+
+func TestMultiQuery(t *testing.T) {
+       runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+               // Create Table
+               dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ")
+
+               // Create Data
+               res := dbt.mustExec("INSERT INTO test VALUES (1, 1)")
+               count, err := res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 1 {
+                       dbt.Fatalf("expected 1 affected row, got %d", count)
+               }
+
+               // Update
+               res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;")
+               count, err = res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 1 {
+                       dbt.Fatalf("expected 1 affected row, got %d", count)
+               }
+
+               // Read
+               var out int
+               rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;")
+               if rows.Next() {
+                       rows.Scan(&out)
+                       if 5 != out {
+                               dbt.Errorf("5 != %d", out)
+                       }
+
+                       if rows.Next() {
+                               dbt.Error("unexpected data")
+                       }
+               } else {
+                       dbt.Error("no data")
+               }
+
+       })
+}
+
+func TestInt(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"}
+               in := int64(42)
+               var out int64
+               var rows *sql.Rows
+
+               // SIGNED
+               for _, v := range types {
+                       dbt.mustExec("CREATE TABLE test (value " + v + ")")
+
+                       dbt.mustExec("INSERT INTO test VALUES (?)", in)
+
+                       rows = dbt.mustQuery("SELECT value FROM test")
+                       if rows.Next() {
+                               rows.Scan(&out)
+                               if in != out {
+                                       dbt.Errorf("%s: %d != %d", v, in, out)
+                               }
+                       } else {
+                               dbt.Errorf("%s: no data", v)
+                       }
+
+                       dbt.mustExec("DROP TABLE IF EXISTS test")
+               }
+
+               // UNSIGNED ZEROFILL
+               for _, v := range types {
+                       dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)")
+
+                       dbt.mustExec("INSERT INTO test VALUES (?)", in)
+
+                       rows = dbt.mustQuery("SELECT value FROM test")
+                       if rows.Next() {
+                               rows.Scan(&out)
+                               if in != out {
+                                       dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out)
+                               }
+                       } else {
+                               dbt.Errorf("%s ZEROFILL: no data", v)
+                       }
+
+                       dbt.mustExec("DROP TABLE IF EXISTS test")
+               }
+       })
+}
+
+func TestFloat32(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               types := [2]string{"FLOAT", "DOUBLE"}
+               in := float32(42.23)
+               var out float32
+               var rows *sql.Rows
+               for _, v := range types {
+                       dbt.mustExec("CREATE TABLE test (value " + v + ")")
+                       dbt.mustExec("INSERT INTO test VALUES (?)", in)
+                       rows = dbt.mustQuery("SELECT value FROM test")
+                       if rows.Next() {
+                               rows.Scan(&out)
+                               if in != out {
+                                       dbt.Errorf("%s: %g != %g", v, in, out)
+                               }
+                       } else {
+                               dbt.Errorf("%s: no data", v)
+                       }
+                       dbt.mustExec("DROP TABLE IF EXISTS test")
+               }
+       })
+}
+
+func TestFloat64(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               types := [2]string{"FLOAT", "DOUBLE"}
+               var expected float64 = 42.23
+               var out float64
+               var rows *sql.Rows
+               for _, v := range types {
+                       dbt.mustExec("CREATE TABLE test (value " + v + ")")
+                       dbt.mustExec("INSERT INTO test VALUES (42.23)")
+                       rows = dbt.mustQuery("SELECT value FROM test")
+                       if rows.Next() {
+                               rows.Scan(&out)
+                               if expected != out {
+                                       dbt.Errorf("%s: %g != %g", v, expected, out)
+                               }
+                       } else {
+                               dbt.Errorf("%s: no data", v)
+                       }
+                       dbt.mustExec("DROP TABLE IF EXISTS test")
+               }
+       })
+}
+
+func TestFloat64Placeholder(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               types := [2]string{"FLOAT", "DOUBLE"}
+               var expected float64 = 42.23
+               var out float64
+               var rows *sql.Rows
+               for _, v := range types {
+                       dbt.mustExec("CREATE TABLE test (id int, value " + v + ")")
+                       dbt.mustExec("INSERT INTO test VALUES (1, 42.23)")
+                       rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1)
+                       if rows.Next() {
+                               rows.Scan(&out)
+                               if expected != out {
+                                       dbt.Errorf("%s: %g != %g", v, expected, out)
+                               }
+                       } else {
+                               dbt.Errorf("%s: no data", v)
+                       }
+                       dbt.mustExec("DROP TABLE IF EXISTS test")
+               }
+       })
+}
+
+func TestString(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"}
+               in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах  น่าฟังเอย"
+               var out string
+               var rows *sql.Rows
+
+               for _, v := range types {
+                       dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8")
+
+                       dbt.mustExec("INSERT INTO test VALUES (?)", in)
+
+                       rows = dbt.mustQuery("SELECT value FROM test")
+                       if rows.Next() {
+                               rows.Scan(&out)
+                               if in != out {
+                                       dbt.Errorf("%s: %s != %s", v, in, out)
+                               }
+                       } else {
+                               dbt.Errorf("%s: no data", v)
+                       }
+
+                       dbt.mustExec("DROP TABLE IF EXISTS test")
+               }
+
+               // BLOB
+               dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
+
+               id := 2
+               in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
+                       "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
+                       "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
+                       "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " +
+                       "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
+                       "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
+                       "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
+                       "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet."
+               dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in)
+
+               err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out)
+               if err != nil {
+                       dbt.Fatalf("Error on BLOB-Query: %s", err.Error())
+               } else if out != in {
+                       dbt.Errorf("BLOB: %s != %s", in, out)
+               }
+       })
+}
+
+func TestRawBytes(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               v1 := []byte("aaa")
+               v2 := []byte("bbb")
+               rows := dbt.mustQuery("SELECT ?, ?", v1, v2)
+               if rows.Next() {
+                       var o1, o2 sql.RawBytes
+                       if err := rows.Scan(&o1, &o2); err != nil {
+                               dbt.Errorf("Got error: %v", err)
+                       }
+                       if !bytes.Equal(v1, o1) {
+                               dbt.Errorf("expected %v, got %v", v1, o1)
+                       }
+                       if !bytes.Equal(v2, o2) {
+                               dbt.Errorf("expected %v, got %v", v2, o2)
+                       }
+                       // https://github.com/go-sql-driver/mysql/issues/765
+                       // Appending to RawBytes shouldn't overwrite next RawBytes.
+                       o1 = append(o1, "xyzzy"...)
+                       if !bytes.Equal(v2, o2) {
+                               dbt.Errorf("expected %v, got %v", v2, o2)
+                       }
+               } else {
+                       dbt.Errorf("no data")
+               }
+       })
+}
+
+type testValuer struct {
+       value string
+}
+
+func (tv testValuer) Value() (driver.Value, error) {
+       return tv.value, nil
+}
+
+func TestValuer(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               in := testValuer{"a_value"}
+               var out string
+               var rows *sql.Rows
+
+               dbt.mustExec("CREATE TABLE test (value VARCHAR(255)) CHARACTER SET utf8")
+               dbt.mustExec("INSERT INTO test VALUES (?)", in)
+               rows = dbt.mustQuery("SELECT value FROM test")
+               if rows.Next() {
+                       rows.Scan(&out)
+                       if in.value != out {
+                               dbt.Errorf("Valuer: %v != %s", in, out)
+                       }
+               } else {
+                       dbt.Errorf("Valuer: no data")
+               }
+
+               dbt.mustExec("DROP TABLE IF EXISTS test")
+       })
+}
+
+type testValuerWithValidation struct {
+       value string
+}
+
+func (tv testValuerWithValidation) Value() (driver.Value, error) {
+       if len(tv.value) == 0 {
+               return nil, fmt.Errorf("Invalid string valuer. Value must not be empty")
+       }
+
+       return tv.value, nil
+}
+
+func TestValuerWithValidation(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               in := testValuerWithValidation{"a_value"}
+               var out string
+               var rows *sql.Rows
+
+               dbt.mustExec("CREATE TABLE testValuer (value VARCHAR(255)) CHARACTER SET utf8")
+               dbt.mustExec("INSERT INTO testValuer VALUES (?)", in)
+
+               rows = dbt.mustQuery("SELECT value FROM testValuer")
+               defer rows.Close()
+
+               if rows.Next() {
+                       rows.Scan(&out)
+                       if in.value != out {
+                               dbt.Errorf("Valuer: %v != %s", in, out)
+                       }
+               } else {
+                       dbt.Errorf("Valuer: no data")
+               }
+
+               if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", testValuerWithValidation{""}); err == nil {
+                       dbt.Errorf("Failed to check valuer error")
+               }
+
+               if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", nil); err != nil {
+                       dbt.Errorf("Failed to check nil")
+               }
+
+               if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", map[string]bool{}); err == nil {
+                       dbt.Errorf("Failed to check not valuer")
+               }
+
+               dbt.mustExec("DROP TABLE IF EXISTS testValuer")
+       })
+}
+
+type timeTests struct {
+       dbtype  string
+       tlayout string
+       tests   []timeTest
+}
+
+type timeTest struct {
+       s string // leading "!": do not use t as value in queries
+       t time.Time
+}
+
+type timeMode byte
+
+func (t timeMode) String() string {
+       switch t {
+       case binaryString:
+               return "binary:string"
+       case binaryTime:
+               return "binary:time.Time"
+       case textString:
+               return "text:string"
+       }
+       panic("unsupported timeMode")
+}
+
+func (t timeMode) Binary() bool {
+       switch t {
+       case binaryString, binaryTime:
+               return true
+       }
+       return false
+}
+
+const (
+       binaryString timeMode = iota
+       binaryTime
+       textString
+)
+
+func (t timeTest) genQuery(dbtype string, mode timeMode) string {
+       var inner string
+       if mode.Binary() {
+               inner = "?"
+       } else {
+               inner = `"%s"`
+       }
+       return `SELECT cast(` + inner + ` as ` + dbtype + `)`
+}
+
+func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
+       var rows *sql.Rows
+       query := t.genQuery(dbtype, mode)
+       switch mode {
+       case binaryString:
+               rows = dbt.mustQuery(query, t.s)
+       case binaryTime:
+               rows = dbt.mustQuery(query, t.t)
+       case textString:
+               query = fmt.Sprintf(query, t.s)
+               rows = dbt.mustQuery(query)
+       default:
+               panic("unsupported mode")
+       }
+       defer rows.Close()
+       var err error
+       if !rows.Next() {
+               err = rows.Err()
+               if err == nil {
+                       err = fmt.Errorf("no data")
+               }
+               dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
+               return
+       }
+       var dst interface{}
+       err = rows.Scan(&dst)
+       if err != nil {
+               dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
+               return
+       }
+       switch val := dst.(type) {
+       case []uint8:
+               str := string(val)
+               if str == t.s {
+                       return
+               }
+               if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s {
+                       // a fix mainly for TravisCI:
+                       // accept full microsecond resolution in result for DATETIME columns
+                       // where the binary protocol was used
+                       return
+               }
+               dbt.Errorf("%s [%s] to string: expected %q, got %q",
+                       dbtype, mode,
+                       t.s, str,
+               )
+       case time.Time:
+               if val == t.t {
+                       return
+               }
+               dbt.Errorf("%s [%s] to string: expected %q, got %q",
+                       dbtype, mode,
+                       t.s, val.Format(tlayout),
+               )
+       default:
+               fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t})
+               dbt.Errorf("%s [%s]: unhandled type %T (is '%v')",
+                       dbtype, mode,
+                       val, val,
+               )
+       }
+}
+
+func TestDateTime(t *testing.T) {
+       afterTime := func(t time.Time, d string) time.Time {
+               dur, err := time.ParseDuration(d)
+               if err != nil {
+                       panic(err)
+               }
+               return t.Add(dur)
+       }
+       // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests
+       format := "2006-01-02 15:04:05.999999"
+       t0 := time.Time{}
+       tstr0 := "0000-00-00 00:00:00.000000"
+       testcases := []timeTests{
+               {"DATE", format[:10], []timeTest{
+                       {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)},
+                       {t: t0, s: tstr0[:10]},
+               }},
+               {"DATETIME", format[:19], []timeTest{
+                       {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
+                       {t: t0, s: tstr0[:19]},
+               }},
+               {"DATETIME(0)", format[:21], []timeTest{
+                       {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
+                       {t: t0, s: tstr0[:19]},
+               }},
+               {"DATETIME(1)", format[:21], []timeTest{
+                       {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)},
+                       {t: t0, s: tstr0[:21]},
+               }},
+               {"DATETIME(6)", format, []timeTest{
+                       {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)},
+                       {t: t0, s: tstr0},
+               }},
+               {"TIME", format[11:19], []timeTest{
+                       {t: afterTime(t0, "12345s")},
+                       {s: "!-12:34:56"},
+                       {s: "!-838:59:59"},
+                       {s: "!838:59:59"},
+                       {t: t0, s: tstr0[11:19]},
+               }},
+               {"TIME(0)", format[11:19], []timeTest{
+                       {t: afterTime(t0, "12345s")},
+                       {s: "!-12:34:56"},
+                       {s: "!-838:59:59"},
+                       {s: "!838:59:59"},
+                       {t: t0, s: tstr0[11:19]},
+               }},
+               {"TIME(1)", format[11:21], []timeTest{
+                       {t: afterTime(t0, "12345600ms")},
+                       {s: "!-12:34:56.7"},
+                       {s: "!-838:59:58.9"},
+                       {s: "!838:59:58.9"},
+                       {t: t0, s: tstr0[11:21]},
+               }},
+               {"TIME(6)", format[11:], []timeTest{
+                       {t: afterTime(t0, "1234567890123000ns")},
+                       {s: "!-12:34:56.789012"},
+                       {s: "!-838:59:58.999999"},
+                       {s: "!838:59:58.999999"},
+                       {t: t0, s: tstr0[11:]},
+               }},
+       }
+       dsns := []string{
+               dsn + "&parseTime=true",
+               dsn + "&parseTime=false",
+       }
+       for _, testdsn := range dsns {
+               runTests(t, testdsn, func(dbt *DBTest) {
+                       microsecsSupported := false
+                       zeroDateSupported := false
+                       var rows *sql.Rows
+                       var err error
+                       rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`)
+                       if err == nil {
+                               rows.Scan(&microsecsSupported)
+                               rows.Close()
+                       }
+                       rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`)
+                       if err == nil {
+                               rows.Scan(&zeroDateSupported)
+                               rows.Close()
+                       }
+                       for _, setups := range testcases {
+                               if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" {
+                                       // skip fractional second tests if unsupported by server
+                                       continue
+                               }
+                               for _, setup := range setups.tests {
+                                       allowBinTime := true
+                                       if setup.s == "" {
+                                               // fill time string wherever Go can reliable produce it
+                                               setup.s = setup.t.Format(setups.tlayout)
+                                       } else if setup.s[0] == '!' {
+                                               // skip tests using setup.t as source in queries
+                                               allowBinTime = false
+                                               // fix setup.s - remove the "!"
+                                               setup.s = setup.s[1:]
+                                       }
+                                       if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] {
+                                               // skip disallowed 0000-00-00 date
+                                               continue
+                                       }
+                                       setup.run(dbt, setups.dbtype, setups.tlayout, textString)
+                                       setup.run(dbt, setups.dbtype, setups.tlayout, binaryString)
+                                       if allowBinTime {
+                                               setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime)
+                                       }
+                               }
+                       }
+               })
+       }
+}
+
+func TestTimestampMicros(t *testing.T) {
+       format := "2006-01-02 15:04:05.999999"
+       f0 := format[:19]
+       f1 := format[:21]
+       f6 := format[:26]
+       runTests(t, dsn, func(dbt *DBTest) {
+               // check if microseconds are supported.
+               // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width
+               // and not precision.
+               // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html
+               microsecsSupported := false
+               if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil {
+                       rows.Scan(&microsecsSupported)
+                       rows.Close()
+               }
+               if !microsecsSupported {
+                       // skip test
+                       return
+               }
+               _, err := dbt.db.Exec(`
+                       CREATE TABLE test (
+                               value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `',
+                               value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `',
+                               value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `'
+                       )`,
+               )
+               if err != nil {
+                       dbt.Error(err)
+               }
+               defer dbt.mustExec("DROP TABLE IF EXISTS test")
+               dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6)
+               var res0, res1, res6 string
+               rows := dbt.mustQuery("SELECT * FROM test")
+               if !rows.Next() {
+                       dbt.Errorf("test contained no selectable values")
+               }
+               err = rows.Scan(&res0, &res1, &res6)
+               if err != nil {
+                       dbt.Error(err)
+               }
+               if res0 != f0 {
+                       dbt.Errorf("expected %q, got %q", f0, res0)
+               }
+               if res1 != f1 {
+                       dbt.Errorf("expected %q, got %q", f1, res1)
+               }
+               if res6 != f6 {
+                       dbt.Errorf("expected %q, got %q", f6, res6)
+               }
+       })
+}
+
+func TestNULL(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               nullStmt, err := dbt.db.Prepare("SELECT NULL")
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               defer nullStmt.Close()
+
+               nonNullStmt, err := dbt.db.Prepare("SELECT 1")
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               defer nonNullStmt.Close()
+
+               // NullBool
+               var nb sql.NullBool
+               // Invalid
+               if err = nullStmt.QueryRow().Scan(&nb); err != nil {
+                       dbt.Fatal(err)
+               }
+               if nb.Valid {
+                       dbt.Error("valid NullBool which should be invalid")
+               }
+               // Valid
+               if err = nonNullStmt.QueryRow().Scan(&nb); err != nil {
+                       dbt.Fatal(err)
+               }
+               if !nb.Valid {
+                       dbt.Error("invalid NullBool which should be valid")
+               } else if nb.Bool != true {
+                       dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool)
+               }
+
+               // NullFloat64
+               var nf sql.NullFloat64
+               // Invalid
+               if err = nullStmt.QueryRow().Scan(&nf); err != nil {
+                       dbt.Fatal(err)
+               }
+               if nf.Valid {
+                       dbt.Error("valid NullFloat64 which should be invalid")
+               }
+               // Valid
+               if err = nonNullStmt.QueryRow().Scan(&nf); err != nil {
+                       dbt.Fatal(err)
+               }
+               if !nf.Valid {
+                       dbt.Error("invalid NullFloat64 which should be valid")
+               } else if nf.Float64 != float64(1) {
+                       dbt.Errorf("unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64)
+               }
+
+               // NullInt64
+               var ni sql.NullInt64
+               // Invalid
+               if err = nullStmt.QueryRow().Scan(&ni); err != nil {
+                       dbt.Fatal(err)
+               }
+               if ni.Valid {
+                       dbt.Error("valid NullInt64 which should be invalid")
+               }
+               // Valid
+               if err = nonNullStmt.QueryRow().Scan(&ni); err != nil {
+                       dbt.Fatal(err)
+               }
+               if !ni.Valid {
+                       dbt.Error("invalid NullInt64 which should be valid")
+               } else if ni.Int64 != int64(1) {
+                       dbt.Errorf("unexpected NullInt64 value: %d (should be 1)", ni.Int64)
+               }
+
+               // NullString
+               var ns sql.NullString
+               // Invalid
+               if err = nullStmt.QueryRow().Scan(&ns); err != nil {
+                       dbt.Fatal(err)
+               }
+               if ns.Valid {
+                       dbt.Error("valid NullString which should be invalid")
+               }
+               // Valid
+               if err = nonNullStmt.QueryRow().Scan(&ns); err != nil {
+                       dbt.Fatal(err)
+               }
+               if !ns.Valid {
+                       dbt.Error("invalid NullString which should be valid")
+               } else if ns.String != `1` {
+                       dbt.Error("unexpected NullString value:" + ns.String + " (should be `1`)")
+               }
+
+               // nil-bytes
+               var b []byte
+               // Read nil
+               if err = nullStmt.QueryRow().Scan(&b); err != nil {
+                       dbt.Fatal(err)
+               }
+               if b != nil {
+                       dbt.Error("non-nil []byte which should be nil")
+               }
+               // Read non-nil
+               if err = nonNullStmt.QueryRow().Scan(&b); err != nil {
+                       dbt.Fatal(err)
+               }
+               if b == nil {
+                       dbt.Error("nil []byte which should be non-nil")
+               }
+               // Insert nil
+               b = nil
+               success := false
+               if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil {
+                       dbt.Fatal(err)
+               }
+               if !success {
+                       dbt.Error("inserting []byte(nil) as NULL failed")
+               }
+               // Check input==output with input==nil
+               b = nil
+               if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
+                       dbt.Fatal(err)
+               }
+               if b != nil {
+                       dbt.Error("non-nil echo from nil input")
+               }
+               // Check input==output with input!=nil
+               b = []byte("")
+               if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
+                       dbt.Fatal(err)
+               }
+               if b == nil {
+                       dbt.Error("nil echo from non-nil input")
+               }
+
+               // Insert NULL
+               dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)")
+
+               dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2)
+
+               var out interface{}
+               rows := dbt.mustQuery("SELECT * FROM test")
+               if rows.Next() {
+                       rows.Scan(&out)
+                       if out != nil {
+                               dbt.Errorf("%v != nil", out)
+                       }
+               } else {
+                       dbt.Error("no data")
+               }
+       })
+}
+
+func TestUint64(t *testing.T) {
+       const (
+               u0    = uint64(0)
+               uall  = ^u0
+               uhigh = uall >> 1
+               utop  = ^uhigh
+               s0    = int64(0)
+               sall  = ^s0
+               shigh = int64(uhigh)
+               stop  = ^shigh
+       )
+       runTests(t, dsn, func(dbt *DBTest) {
+               stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`)
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               defer stmt.Close()
+               row := stmt.QueryRow(
+                       u0, uhigh, utop, uall,
+                       s0, shigh, stop, sall,
+               )
+
+               var ua, ub, uc, ud uint64
+               var sa, sb, sc, sd int64
+
+               err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd)
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               switch {
+               case ua != u0,
+                       ub != uhigh,
+                       uc != utop,
+                       ud != uall,
+                       sa != s0,
+                       sb != shigh,
+                       sc != stop,
+                       sd != sall:
+                       dbt.Fatal("unexpected result value")
+               }
+       })
+}
+
+func TestLongData(t *testing.T) {
+       runTests(t, dsn+"&maxAllowedPacket=0", func(dbt *DBTest) {
+               var maxAllowedPacketSize int
+               err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize)
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               maxAllowedPacketSize--
+
+               // don't get too ambitious
+               if maxAllowedPacketSize > 1<<25 {
+                       maxAllowedPacketSize = 1 << 25
+               }
+
+               dbt.mustExec("CREATE TABLE test (value LONGBLOB)")
+
+               in := strings.Repeat(`a`, maxAllowedPacketSize+1)
+               var out string
+               var rows *sql.Rows
+
+               // Long text data
+               const nonDataQueryLen = 28 // length query w/o value
+               inS := in[:maxAllowedPacketSize-nonDataQueryLen]
+               dbt.mustExec("INSERT INTO test VALUES('" + inS + "')")
+               rows = dbt.mustQuery("SELECT value FROM test")
+               if rows.Next() {
+                       rows.Scan(&out)
+                       if inS != out {
+                               dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out))
+                       }
+                       if rows.Next() {
+                               dbt.Error("LONGBLOB: unexpexted row")
+                       }
+               } else {
+                       dbt.Fatalf("LONGBLOB: no data")
+               }
+
+               // Empty table
+               dbt.mustExec("TRUNCATE TABLE test")
+
+               // Long binary data
+               dbt.mustExec("INSERT INTO test VALUES(?)", in)
+               rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1)
+               if rows.Next() {
+                       rows.Scan(&out)
+                       if in != out {
+                               dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out))
+                       }
+                       if rows.Next() {
+                               dbt.Error("LONGBLOB: unexpexted row")
+                       }
+               } else {
+                       if err = rows.Err(); err != nil {
+                               dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error())
+                       } else {
+                               dbt.Fatal("LONGBLOB: no data (err: <nil>)")
+                       }
+               }
+       })
+}
+
+func TestLoadData(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               verifyLoadDataResult := func() {
+                       rows, err := dbt.db.Query("SELECT * FROM test")
+                       if err != nil {
+                               dbt.Fatal(err.Error())
+                       }
+
+                       i := 0
+                       values := [4]string{
+                               "a string",
+                               "a string containing a \t",
+                               "a string containing a \n",
+                               "a string containing both \t\n",
+                       }
+
+                       var id int
+                       var value string
+
+                       for rows.Next() {
+                               i++
+                               err = rows.Scan(&id, &value)
+                               if err != nil {
+                                       dbt.Fatal(err.Error())
+                               }
+                               if i != id {
+                                       dbt.Fatalf("%d != %d", i, id)
+                               }
+                               if values[i-1] != value {
+                                       dbt.Fatalf("%q != %q", values[i-1], value)
+                               }
+                       }
+                       err = rows.Err()
+                       if err != nil {
+                               dbt.Fatal(err.Error())
+                       }
+
+                       if i != 4 {
+                               dbt.Fatalf("rows count mismatch. Got %d, want 4", i)
+                       }
+               }
+
+               dbt.db.Exec("DROP TABLE IF EXISTS test")
+               dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
+
+               // Local File
+               file, err := ioutil.TempFile("", "gotest")
+               defer os.Remove(file.Name())
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               RegisterLocalFile(file.Name())
+
+               // Try first with empty file
+               dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
+               var count int
+               err = dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&count)
+               if err != nil {
+                       dbt.Fatal(err.Error())
+               }
+               if count != 0 {
+                       dbt.Fatalf("unexpected row count: got %d, want 0", count)
+               }
+
+               // Then fille File with data and try to load it
+               file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
+               file.Close()
+               dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
+               verifyLoadDataResult()
+
+               // Try with non-existing file
+               _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test")
+               if err == nil {
+                       dbt.Fatal("load non-existent file didn't fail")
+               } else if err.Error() != "local file 'doesnotexist' is not registered" {
+                       dbt.Fatal(err.Error())
+               }
+
+               // Empty table
+               dbt.mustExec("TRUNCATE TABLE test")
+
+               // Reader
+               RegisterReaderHandler("test", func() io.Reader {
+                       file, err = os.Open(file.Name())
+                       if err != nil {
+                               dbt.Fatal(err)
+                       }
+                       return file
+               })
+               dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test")
+               verifyLoadDataResult()
+               // negative test
+               _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test")
+               if err == nil {
+                       dbt.Fatal("load non-existent Reader didn't fail")
+               } else if err.Error() != "Reader 'doesnotexist' is not registered" {
+                       dbt.Fatal(err.Error())
+               }
+       })
+}
+
+func TestFoundRows(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
+               dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
+
+               res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
+               count, err := res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 2 {
+                       dbt.Fatalf("Expected 2 affected rows, got %d", count)
+               }
+               res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
+               count, err = res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 2 {
+                       dbt.Fatalf("Expected 2 affected rows, got %d", count)
+               }
+       })
+       runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
+               dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
+
+               res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
+               count, err := res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 2 {
+                       dbt.Fatalf("Expected 2 matched rows, got %d", count)
+               }
+               res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
+               count, err = res.RowsAffected()
+               if err != nil {
+                       dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+               }
+               if count != 3 {
+                       dbt.Fatalf("Expected 3 matched rows, got %d", count)
+               }
+       })
+}
+
+func TestTLS(t *testing.T) {
+       tlsTest := func(dbt *DBTest) {
+               if err := dbt.db.Ping(); err != nil {
+                       if err == ErrNoTLS {
+                               dbt.Skip("server does not support TLS")
+                       } else {
+                               dbt.Fatalf("error on Ping: %s", err.Error())
+                       }
+               }
+
+               rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'")
+
+               var variable, value *sql.RawBytes
+               for rows.Next() {
+                       if err := rows.Scan(&variable, &value); err != nil {
+                               dbt.Fatal(err.Error())
+                       }
+
+                       if value == nil {
+                               dbt.Fatal("no Cipher")
+                       }
+               }
+       }
+
+       runTests(t, dsn+"&tls=skip-verify", tlsTest)
+
+       // Verify that registering / using a custom cfg works
+       RegisterTLSConfig("custom-skip-verify", &tls.Config{
+               InsecureSkipVerify: true,
+       })
+       runTests(t, dsn+"&tls=custom-skip-verify", tlsTest)
+}
+
+func TestReuseClosedConnection(t *testing.T) {
+       // this test does not use sql.database, it uses the driver directly
+       if !available {
+               t.Skipf("MySQL server not running on %s", netAddr)
+       }
+
+       md := &MySQLDriver{}
+       conn, err := md.Open(dsn)
+       if err != nil {
+               t.Fatalf("error connecting: %s", err.Error())
+       }
+       stmt, err := conn.Prepare("DO 1")
+       if err != nil {
+               t.Fatalf("error preparing statement: %s", err.Error())
+       }
+       _, err = stmt.Exec(nil)
+       if err != nil {
+               t.Fatalf("error executing statement: %s", err.Error())
+       }
+       err = conn.Close()
+       if err != nil {
+               t.Fatalf("error closing connection: %s", err.Error())
+       }
+
+       defer func() {
+               if err := recover(); err != nil {
+                       t.Errorf("panic after reusing a closed connection: %v", err)
+               }
+       }()
+       _, err = stmt.Exec(nil)
+       if err != nil && err != driver.ErrBadConn {
+               t.Errorf("unexpected error '%s', expected '%s'",
+                       err.Error(), driver.ErrBadConn.Error())
+       }
+}
+
+func TestCharset(t *testing.T) {
+       if !available {
+               t.Skipf("MySQL server not running on %s", netAddr)
+       }
+
+       mustSetCharset := func(charsetParam, expected string) {
+               runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) {
+                       rows := dbt.mustQuery("SELECT @@character_set_connection")
+                       defer rows.Close()
+
+                       if !rows.Next() {
+                               dbt.Fatalf("error getting connection charset: %s", rows.Err())
+                       }
+
+                       var got string
+                       rows.Scan(&got)
+
+                       if got != expected {
+                               dbt.Fatalf("expected connection charset %s but got %s", expected, got)
+                       }
+               })
+       }
+
+       // non utf8 test
+       mustSetCharset("charset=ascii", "ascii")
+
+       // when the first charset is invalid, use the second
+       mustSetCharset("charset=none,utf8", "utf8")
+
+       // when the first charset is valid, use it
+       mustSetCharset("charset=ascii,utf8", "ascii")
+       mustSetCharset("charset=utf8,ascii", "utf8")
+}
+
+func TestFailingCharset(t *testing.T) {
+       runTests(t, dsn+"&charset=none", func(dbt *DBTest) {
+               // run query to really establish connection...
+               _, err := dbt.db.Exec("SELECT 1")
+               if err == nil {
+                       dbt.db.Close()
+                       t.Fatalf("connection must not succeed without a valid charset")
+               }
+       })
+}
+
+func TestCollation(t *testing.T) {
+       if !available {
+               t.Skipf("MySQL server not running on %s", netAddr)
+       }
+
+       defaultCollation := "utf8_general_ci"
+       testCollations := []string{
+               "",               // do not set
+               defaultCollation, // driver default
+               "latin1_general_ci",
+               "binary",
+               "utf8_unicode_ci",
+               "cp1257_bin",
+       }
+
+       for _, collation := range testCollations {
+               var expected, tdsn string
+               if collation != "" {
+                       tdsn = dsn + "&collation=" + collation
+                       expected = collation
+               } else {
+                       tdsn = dsn
+                       expected = defaultCollation
+               }
+
+               runTests(t, tdsn, func(dbt *DBTest) {
+                       var got string
+                       if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil {
+                               dbt.Fatal(err)
+                       }
+
+                       if got != expected {
+                               dbt.Fatalf("expected connection collation %s but got %s", expected, got)
+                       }
+               })
+       }
+}
+
+func TestColumnsWithAlias(t *testing.T) {
+       runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) {
+               rows := dbt.mustQuery("SELECT 1 AS A")
+               defer rows.Close()
+               cols, _ := rows.Columns()
+               if len(cols) != 1 {
+                       t.Fatalf("expected 1 column, got %d", len(cols))
+               }
+               if cols[0] != "A" {
+                       t.Fatalf("expected column name \"A\", got \"%s\"", cols[0])
+               }
+               rows.Close()
+
+               rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A")
+               cols, _ = rows.Columns()
+               if len(cols) != 1 {
+                       t.Fatalf("expected 1 column, got %d", len(cols))
+               }
+               if cols[0] != "A.one" {
+                       t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0])
+               }
+       })
+}
+
+func TestRawBytesResultExceedsBuffer(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               // defaultBufSize from buffer.go
+               expected := strings.Repeat("abc", defaultBufSize)
+
+               rows := dbt.mustQuery("SELECT '" + expected + "'")
+               defer rows.Close()
+               if !rows.Next() {
+                       dbt.Error("expected result, got none")
+               }
+               var result sql.RawBytes
+               rows.Scan(&result)
+               if expected != string(result) {
+                       dbt.Error("result did not match expected value")
+               }
+       })
+}
+
+func TestTimezoneConversion(t *testing.T) {
+       zones := []string{"UTC", "US/Central", "US/Pacific", "Local"}
+
+       // Regression test for timezone handling
+       tzTest := func(dbt *DBTest) {
+               // Create table
+               dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)")
+
+               // Insert local time into database (should be converted)
+               usCentral, _ := time.LoadLocation("US/Central")
+               reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral)
+               dbt.mustExec("INSERT INTO test VALUE (?)", reftime)
+
+               // Retrieve time from DB
+               rows := dbt.mustQuery("SELECT ts FROM test")
+               if !rows.Next() {
+                       dbt.Fatal("did not get any rows out")
+               }
+
+               var dbTime time.Time
+               err := rows.Scan(&dbTime)
+               if err != nil {
+                       dbt.Fatal("Err", err)
+               }
+
+               // Check that dates match
+               if reftime.Unix() != dbTime.Unix() {
+                       dbt.Errorf("times do not match.\n")
+                       dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime)
+                       dbt.Errorf(" Now(UTC)=%v\n", dbTime)
+               }
+       }
+
+       for _, tz := range zones {
+               runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest)
+       }
+}
+
+// Special cases
+
+func TestRowsClose(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               rows, err := dbt.db.Query("SELECT 1")
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               err = rows.Close()
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               if rows.Next() {
+                       dbt.Fatal("unexpected row after rows.Close()")
+               }
+
+               err = rows.Err()
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+       })
+}
+
+// dangling statements
+// http://code.google.com/p/go/issues/detail?id=3865
+func TestCloseStmtBeforeRows(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               stmt, err := dbt.db.Prepare("SELECT 1")
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               rows, err := stmt.Query()
+               if err != nil {
+                       stmt.Close()
+                       dbt.Fatal(err)
+               }
+               defer rows.Close()
+
+               err = stmt.Close()
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               if !rows.Next() {
+                       dbt.Fatal("getting row failed")
+               } else {
+                       err = rows.Err()
+                       if err != nil {
+                               dbt.Fatal(err)
+                       }
+
+                       var out bool
+                       err = rows.Scan(&out)
+                       if err != nil {
+                               dbt.Fatalf("error on rows.Scan(): %s", err.Error())
+                       }
+                       if out != true {
+                               dbt.Errorf("true != %t", out)
+                       }
+               }
+       })
+}
+
+// It is valid to have multiple Rows for the same Stmt
+// http://code.google.com/p/go/issues/detail?id=3734
+func TestStmtMultiRows(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0")
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               rows1, err := stmt.Query()
+               if err != nil {
+                       stmt.Close()
+                       dbt.Fatal(err)
+               }
+               defer rows1.Close()
+
+               rows2, err := stmt.Query()
+               if err != nil {
+                       stmt.Close()
+                       dbt.Fatal(err)
+               }
+               defer rows2.Close()
+
+               var out bool
+
+               // 1
+               if !rows1.Next() {
+                       dbt.Fatal("first rows1.Next failed")
+               } else {
+                       err = rows1.Err()
+                       if err != nil {
+                               dbt.Fatal(err)
+                       }
+
+                       err = rows1.Scan(&out)
+                       if err != nil {
+                               dbt.Fatalf("error on rows.Scan(): %s", err.Error())
+                       }
+                       if out != true {
+                               dbt.Errorf("true != %t", out)
+                       }
+               }
+
+               if !rows2.Next() {
+                       dbt.Fatal("first rows2.Next failed")
+               } else {
+                       err = rows2.Err()
+                       if err != nil {
+                               dbt.Fatal(err)
+                       }
+
+                       err = rows2.Scan(&out)
+                       if err != nil {
+                               dbt.Fatalf("error on rows.Scan(): %s", err.Error())
+                       }
+                       if out != true {
+                               dbt.Errorf("true != %t", out)
+                       }
+               }
+
+               // 2
+               if !rows1.Next() {
+                       dbt.Fatal("second rows1.Next failed")
+               } else {
+                       err = rows1.Err()
+                       if err != nil {
+                               dbt.Fatal(err)
+                       }
+
+                       err = rows1.Scan(&out)
+                       if err != nil {
+                               dbt.Fatalf("error on rows.Scan(): %s", err.Error())
+                       }
+                       if out != false {
+                               dbt.Errorf("false != %t", out)
+                       }
+
+                       if rows1.Next() {
+                               dbt.Fatal("unexpected row on rows1")
+                       }
+                       err = rows1.Close()
+                       if err != nil {
+                               dbt.Fatal(err)
+                       }
+               }
+
+               if !rows2.Next() {
+                       dbt.Fatal("second rows2.Next failed")
+               } else {
+                       err = rows2.Err()
+                       if err != nil {
+                               dbt.Fatal(err)
+                       }
+
+                       err = rows2.Scan(&out)
+                       if err != nil {
+                               dbt.Fatalf("error on rows.Scan(): %s", err.Error())
+                       }
+                       if out != false {
+                               dbt.Errorf("false != %t", out)
+                       }
+
+                       if rows2.Next() {
+                               dbt.Fatal("unexpected row on rows2")
+                       }
+                       err = rows2.Close()
+                       if err != nil {
+                               dbt.Fatal(err)
+                       }
+               }
+       })
+}
+
+// Regression test for
+// * more than 32 NULL parameters (issue 209)
+// * more parameters than fit into the buffer (issue 201)
+// * parameters * 64 > max_allowed_packet (issue 734)
+func TestPreparedManyCols(t *testing.T) {
+       numParams := 65535
+       runTests(t, dsn, func(dbt *DBTest) {
+               query := "SELECT ?" + strings.Repeat(",?", numParams-1)
+               stmt, err := dbt.db.Prepare(query)
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               defer stmt.Close()
+
+               // create more parameters than fit into the buffer
+               // which will take nil-values
+               params := make([]interface{}, numParams)
+               rows, err := stmt.Query(params...)
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               rows.Close()
+
+               // Create 0byte string which we can't send via STMT_LONG_DATA.
+               for i := 0; i < numParams; i++ {
+                       params[i] = ""
+               }
+               rows, err = stmt.Query(params...)
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+               rows.Close()
+       })
+}
+
+func TestConcurrent(t *testing.T) {
+       if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled {
+               t.Skip("MYSQL_TEST_CONCURRENT env var not set")
+       }
+
+       runTests(t, dsn, func(dbt *DBTest) {
+               var max int
+               err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max)
+               if err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+               dbt.Logf("testing up to %d concurrent connections \r\n", max)
+
+               var remaining, succeeded int32 = int32(max), 0
+
+               var wg sync.WaitGroup
+               wg.Add(max)
+
+               var fatalError string
+               var once sync.Once
+               fatalf := func(s string, vals ...interface{}) {
+                       once.Do(func() {
+                               fatalError = fmt.Sprintf(s, vals...)
+                       })
+               }
+
+               for i := 0; i < max; i++ {
+                       go func(id int) {
+                               defer wg.Done()
+
+                               tx, err := dbt.db.Begin()
+                               atomic.AddInt32(&remaining, -1)
+
+                               if err != nil {
+                                       if err.Error() != "Error 1040: Too many connections" {
+                                               fatalf("error on conn %d: %s", id, err.Error())
+                                       }
+                                       return
+                               }
+
+                               // keep the connection busy until all connections are open
+                               for remaining > 0 {
+                                       if _, err = tx.Exec("DO 1"); err != nil {
+                                               fatalf("error on conn %d: %s", id, err.Error())
+                                               return
+                                       }
+                               }
+
+                               if err = tx.Commit(); err != nil {
+                                       fatalf("error on conn %d: %s", id, err.Error())
+                                       return
+                               }
+
+                               // everything went fine with this connection
+                               atomic.AddInt32(&succeeded, 1)
+                       }(i)
+               }
+
+               // wait until all conections are open
+               wg.Wait()
+
+               if fatalError != "" {
+                       dbt.Fatal(fatalError)
+               }
+
+               dbt.Logf("reached %d concurrent connections\r\n", succeeded)
+       })
+}
+
+// Tests custom dial functions
+func TestCustomDial(t *testing.T) {
+       if !available {
+               t.Skipf("MySQL server not running on %s", netAddr)
+       }
+
+       // our custom dial function which justs wraps net.Dial here
+       RegisterDial("mydial", func(addr string) (net.Conn, error) {
+               return net.Dial(prot, addr)
+       })
+
+       db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
+       if err != nil {
+               t.Fatalf("error connecting: %s", err.Error())
+       }
+       defer db.Close()
+
+       if _, err = db.Exec("DO 1"); err != nil {
+               t.Fatalf("connection failed: %s", err.Error())
+       }
+}
+
+func TestSQLInjection(t *testing.T) {
+       createTest := func(arg string) func(dbt *DBTest) {
+               return func(dbt *DBTest) {
+                       dbt.mustExec("CREATE TABLE test (v INTEGER)")
+                       dbt.mustExec("INSERT INTO test VALUES (?)", 1)
+
+                       var v int
+                       // NULL can't be equal to anything, the idea here is to inject query so it returns row
+                       // This test verifies that escapeQuotes and escapeBackslash are working properly
+                       err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v)
+                       if err == sql.ErrNoRows {
+                               return // success, sql injection failed
+                       } else if err == nil {
+                               dbt.Errorf("sql injection successful with arg: %s", arg)
+                       } else {
+                               dbt.Errorf("error running query with arg: %s; err: %s", arg, err.Error())
+                       }
+               }
+       }
+
+       dsns := []string{
+               dsn,
+               dsn + "&sql_mode='NO_BACKSLASH_ESCAPES'",
+       }
+       for _, testdsn := range dsns {
+               runTests(t, testdsn, createTest("1 OR 1=1"))
+               runTests(t, testdsn, createTest("' OR '1'='1"))
+       }
+}
+
+// Test if inserted data is correctly retrieved after being escaped
+func TestInsertRetrieveEscapedData(t *testing.T) {
+       testData := func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v VARCHAR(255))")
+
+               // All sequences that are escaped by escapeQuotes and escapeBackslash
+               v := "foo \x00\n\r\x1a\"'\\"
+               dbt.mustExec("INSERT INTO test VALUES (?)", v)
+
+               var out string
+               err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out)
+               if err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+
+               if out != v {
+                       dbt.Errorf("%q != %q", out, v)
+               }
+       }
+
+       dsns := []string{
+               dsn,
+               dsn + "&sql_mode='NO_BACKSLASH_ESCAPES'",
+       }
+       for _, testdsn := range dsns {
+               runTests(t, testdsn, testData)
+       }
+}
+
+func TestUnixSocketAuthFail(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               // Save the current logger so we can restore it.
+               oldLogger := errLog
+
+               // Set a new logger so we can capture its output.
+               buffer := bytes.NewBuffer(make([]byte, 0, 64))
+               newLogger := log.New(buffer, "prefix: ", 0)
+               SetLogger(newLogger)
+
+               // Restore the logger.
+               defer SetLogger(oldLogger)
+
+               // Make a new DSN that uses the MySQL socket file and a bad password, which
+               // we can make by simply appending any character to the real password.
+               badPass := pass + "x"
+               socket := ""
+               if prot == "unix" {
+                       socket = addr
+               } else {
+                       // Get socket file from MySQL.
+                       err := dbt.db.QueryRow("SELECT @@socket").Scan(&socket)
+                       if err != nil {
+                               t.Fatalf("error on SELECT @@socket: %s", err.Error())
+                       }
+               }
+               t.Logf("socket: %s", socket)
+               badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s", user, badPass, socket, dbname)
+               db, err := sql.Open("mysql", badDSN)
+               if err != nil {
+                       t.Fatalf("error connecting: %s", err.Error())
+               }
+               defer db.Close()
+
+               // Connect to MySQL for real. This will cause an auth failure.
+               err = db.Ping()
+               if err == nil {
+                       t.Error("expected Ping() to return an error")
+               }
+
+               // The driver should not log anything.
+               if actual := buffer.String(); actual != "" {
+                       t.Errorf("expected no output, got %q", actual)
+               }
+       })
+}
+
+// See Issue #422
+func TestInterruptBySignal(t *testing.T) {
+       runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec(`
+                       DROP PROCEDURE IF EXISTS test_signal;
+                       CREATE PROCEDURE test_signal(ret INT)
+                       BEGIN
+                               SELECT ret;
+                               SIGNAL SQLSTATE
+                                       '45001'
+                               SET
+                                       MESSAGE_TEXT = "an error",
+                                       MYSQL_ERRNO = 45001;
+                       END
+               `)
+               defer dbt.mustExec("DROP PROCEDURE test_signal")
+
+               var val int
+
+               // text protocol
+               rows, err := dbt.db.Query("CALL test_signal(42)")
+               if err != nil {
+                       dbt.Fatalf("error on text query: %s", err.Error())
+               }
+               for rows.Next() {
+                       if err := rows.Scan(&val); err != nil {
+                               dbt.Error(err)
+                       } else if val != 42 {
+                               dbt.Errorf("expected val to be 42")
+                       }
+               }
+
+               // binary protocol
+               rows, err = dbt.db.Query("CALL test_signal(?)", 42)
+               if err != nil {
+                       dbt.Fatalf("error on binary query: %s", err.Error())
+               }
+               for rows.Next() {
+                       if err := rows.Scan(&val); err != nil {
+                               dbt.Error(err)
+                       } else if val != 42 {
+                               dbt.Errorf("expected val to be 42")
+                       }
+               }
+       })
+}
+
+func TestColumnsReusesSlice(t *testing.T) {
+       rows := mysqlRows{
+               rs: resultSet{
+                       columns: []mysqlField{
+                               {
+                                       tableName: "test",
+                                       name:      "A",
+                               },
+                               {
+                                       tableName: "test",
+                                       name:      "B",
+                               },
+                       },
+               },
+       }
+
+       allocs := testing.AllocsPerRun(1, func() {
+               cols := rows.Columns()
+
+               if len(cols) != 2 {
+                       t.Fatalf("expected 2 columns, got %d", len(cols))
+               }
+       })
+
+       if allocs != 0 {
+               t.Fatalf("expected 0 allocations, got %d", int(allocs))
+       }
+
+       if rows.rs.columnNames == nil {
+               t.Fatalf("expected columnNames to be set, got nil")
+       }
+}
+
+func TestRejectReadOnly(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               // Create Table
+               dbt.mustExec("CREATE TABLE test (value BOOL)")
+               // Set the session to read-only. We didn't set the `rejectReadOnly`
+               // option, so any writes after this should fail.
+               _, err := dbt.db.Exec("SET SESSION TRANSACTION READ ONLY")
+               // Error 1193: Unknown system variable 'TRANSACTION' => skip test,
+               // MySQL server version is too old
+               maybeSkip(t, err, 1193)
+               if _, err := dbt.db.Exec("DROP TABLE test"); err == nil {
+                       t.Fatalf("writing to DB in read-only session without " +
+                               "rejectReadOnly did not error")
+               }
+               // Set the session back to read-write so runTests() can properly clean
+               // up the table `test`.
+               dbt.mustExec("SET SESSION TRANSACTION READ WRITE")
+       })
+
+       // Enable the `rejectReadOnly` option.
+       runTests(t, dsn+"&rejectReadOnly=true", func(dbt *DBTest) {
+               // Create Table
+               dbt.mustExec("CREATE TABLE test (value BOOL)")
+               // Set the session to read only. Any writes after this should error on
+               // a driver.ErrBadConn, and cause `database/sql` to initiate a new
+               // connection.
+               dbt.mustExec("SET SESSION TRANSACTION READ ONLY")
+               // This would error, but `database/sql` should automatically retry on a
+               // new connection which is not read-only, and eventually succeed.
+               dbt.mustExec("DROP TABLE test")
+       })
+}
+
+func TestPing(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               if err := dbt.db.Ping(); err != nil {
+                       dbt.fail("Ping", "Ping", err)
+               }
+       })
+}
+
+// See Issue #799
+func TestEmptyPassword(t *testing.T) {
+       if !available {
+               t.Skipf("MySQL server not running on %s", netAddr)
+       }
+
+       dsn := fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, "", netAddr, dbname)
+       db, err := sql.Open("mysql", dsn)
+       if err == nil {
+               defer db.Close()
+               err = db.Ping()
+       }
+
+       if pass == "" {
+               if err != nil {
+                       t.Fatal(err.Error())
+               }
+       } else {
+               if err == nil {
+                       t.Fatal("expected authentication error")
+               }
+               if !strings.HasPrefix(err.Error(), "Error 1045") {
+                       t.Fatal(err.Error())
+               }
+       }
+}
+
+// static interface implementation checks of mysqlConn
+var (
+       _ driver.ConnBeginTx        = &mysqlConn{}
+       _ driver.ConnPrepareContext = &mysqlConn{}
+       _ driver.ExecerContext      = &mysqlConn{}
+       _ driver.Pinger             = &mysqlConn{}
+       _ driver.QueryerContext     = &mysqlConn{}
+)
+
+// static interface implementation checks of mysqlStmt
+var (
+       _ driver.StmtExecContext  = &mysqlStmt{}
+       _ driver.StmtQueryContext = &mysqlStmt{}
+)
+
+// Ensure that all the driver interfaces are implemented
+var (
+       // _ driver.RowsColumnTypeLength        = &binaryRows{}
+       // _ driver.RowsColumnTypeLength        = &textRows{}
+       _ driver.RowsColumnTypeDatabaseTypeName = &binaryRows{}
+       _ driver.RowsColumnTypeDatabaseTypeName = &textRows{}
+       _ driver.RowsColumnTypeNullable         = &binaryRows{}
+       _ driver.RowsColumnTypeNullable         = &textRows{}
+       _ driver.RowsColumnTypePrecisionScale   = &binaryRows{}
+       _ driver.RowsColumnTypePrecisionScale   = &textRows{}
+       _ driver.RowsColumnTypeScanType         = &binaryRows{}
+       _ driver.RowsColumnTypeScanType         = &textRows{}
+       _ driver.RowsNextResultSet              = &binaryRows{}
+       _ driver.RowsNextResultSet              = &textRows{}
+)
+
+func TestMultiResultSet(t *testing.T) {
+       type result struct {
+               values  [][]int
+               columns []string
+       }
+
+       // checkRows is a helper test function to validate rows containing 3 result
+       // sets with specific values and columns. The basic query would look like this:
+       //
+       // SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
+       // SELECT 0 UNION SELECT 1;
+       // SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
+       //
+       // to distinguish test cases the first string argument is put in front of
+       // every error or fatal message.
+       checkRows := func(desc string, rows *sql.Rows, dbt *DBTest) {
+               expected := []result{
+                       {
+                               values:  [][]int{{1, 2}, {3, 4}},
+                               columns: []string{"col1", "col2"},
+                       },
+                       {
+                               values:  [][]int{{1, 2, 3}, {4, 5, 6}},
+                               columns: []string{"col1", "col2", "col3"},
+                       },
+               }
+
+               var res1 result
+               for rows.Next() {
+                       var res [2]int
+                       if err := rows.Scan(&res[0], &res[1]); err != nil {
+                               dbt.Fatal(err)
+                       }
+                       res1.values = append(res1.values, res[:])
+               }
+
+               cols, err := rows.Columns()
+               if err != nil {
+                       dbt.Fatal(desc, err)
+               }
+               res1.columns = cols
+
+               if !reflect.DeepEqual(expected[0], res1) {
+                       dbt.Error(desc, "want =", expected[0], "got =", res1)
+               }
+
+               if !rows.NextResultSet() {
+                       dbt.Fatal(desc, "expected next result set")
+               }
+
+               // ignoring one result set
+
+               if !rows.NextResultSet() {
+                       dbt.Fatal(desc, "expected next result set")
+               }
+
+               var res2 result
+               cols, err = rows.Columns()
+               if err != nil {
+                       dbt.Fatal(desc, err)
+               }
+               res2.columns = cols
+
+               for rows.Next() {
+                       var res [3]int
+                       if err := rows.Scan(&res[0], &res[1], &res[2]); err != nil {
+                               dbt.Fatal(desc, err)
+                       }
+                       res2.values = append(res2.values, res[:])
+               }
+
+               if !reflect.DeepEqual(expected[1], res2) {
+                       dbt.Error(desc, "want =", expected[1], "got =", res2)
+               }
+
+               if rows.NextResultSet() {
+                       dbt.Error(desc, "unexpected next result set")
+               }
+
+               if err := rows.Err(); err != nil {
+                       dbt.Error(desc, err)
+               }
+       }
+
+       runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+               rows := dbt.mustQuery(`DO 1;
+               SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
+               DO 1;
+               SELECT 0 UNION SELECT 1;
+               SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;`)
+               defer rows.Close()
+               checkRows("query: ", rows, dbt)
+       })
+
+       runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+               queries := []string{
+                       `
+                       DROP PROCEDURE IF EXISTS test_mrss;
+                       CREATE PROCEDURE test_mrss()
+                       BEGIN
+                               DO 1;
+                               SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
+                               DO 1;
+                               SELECT 0 UNION SELECT 1;
+                               SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
+                       END
+               `,
+                       `
+                       DROP PROCEDURE IF EXISTS test_mrss;
+                       CREATE PROCEDURE test_mrss()
+                       BEGIN
+                               SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
+                               SELECT 0 UNION SELECT 1;
+                               SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
+                       END
+               `,
+               }
+
+               defer dbt.mustExec("DROP PROCEDURE IF EXISTS test_mrss")
+
+               for i, query := range queries {
+                       dbt.mustExec(query)
+
+                       stmt, err := dbt.db.Prepare("CALL test_mrss()")
+                       if err != nil {
+                               dbt.Fatalf("%v (i=%d)", err, i)
+                       }
+                       defer stmt.Close()
+
+                       for j := 0; j < 2; j++ {
+                               rows, err := stmt.Query()
+                               if err != nil {
+                                       dbt.Fatalf("%v (i=%d) (j=%d)", err, i, j)
+                               }
+                               checkRows(fmt.Sprintf("prepared stmt query (i=%d) (j=%d): ", i, j), rows, dbt)
+                       }
+               }
+       })
+}
+
+func TestMultiResultSetNoSelect(t *testing.T) {
+       runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+               rows := dbt.mustQuery("DO 1; DO 2;")
+               defer rows.Close()
+
+               if rows.Next() {
+                       dbt.Error("unexpected row")
+               }
+
+               if rows.NextResultSet() {
+                       dbt.Error("unexpected next result set")
+               }
+
+               if err := rows.Err(); err != nil {
+                       dbt.Error("expected nil; got ", err)
+               }
+       })
+}
+
+// tests if rows are set in a proper state if some results were ignored before
+// calling rows.NextResultSet.
+func TestSkipResults(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               rows := dbt.mustQuery("SELECT 1, 2")
+               defer rows.Close()
+
+               if !rows.Next() {
+                       dbt.Error("expected row")
+               }
+
+               if rows.NextResultSet() {
+                       dbt.Error("unexpected next result set")
+               }
+
+               if err := rows.Err(); err != nil {
+                       dbt.Error("expected nil; got ", err)
+               }
+       })
+}
+
+func TestPingContext(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               ctx, cancel := context.WithCancel(context.Background())
+               cancel()
+               if err := dbt.db.PingContext(ctx); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+       })
+}
+
+func TestContextCancelExec(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v INTEGER)")
+               ctx, cancel := context.WithCancel(context.Background())
+
+               // Delay execution for just a bit until db.ExecContext has begun.
+               defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
+
+               // This query will be canceled.
+               startTime := time.Now()
+               if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+               if d := time.Since(startTime); d > 500*time.Millisecond {
+                       dbt.Errorf("too long execution time: %s", d)
+               }
+
+               // Wait for the INSERT query to be done.
+               time.Sleep(time.Second)
+
+               // Check how many times the query is executed.
+               var v int
+               if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+               if v != 1 { // TODO: need to kill the query, and v should be 0.
+                       dbt.Skipf("[WARN] expected val to be 1, got %d", v)
+               }
+
+               // Context is already canceled, so error should come before execution.
+               if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (1)"); err == nil {
+                       dbt.Error("expected error")
+               } else if err.Error() != "context canceled" {
+                       dbt.Fatalf("unexpected error: %s", err)
+               }
+
+               // The second insert query will fail, so the table has no changes.
+               if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+               if v != 1 {
+                       dbt.Skipf("[WARN] expected val to be 1, got %d", v)
+               }
+       })
+}
+
+func TestContextCancelQuery(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v INTEGER)")
+               ctx, cancel := context.WithCancel(context.Background())
+
+               // Delay execution for just a bit until db.ExecContext has begun.
+               defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
+
+               // This query will be canceled.
+               startTime := time.Now()
+               if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+               if d := time.Since(startTime); d > 500*time.Millisecond {
+                       dbt.Errorf("too long execution time: %s", d)
+               }
+
+               // Wait for the INSERT query to be done.
+               time.Sleep(time.Second)
+
+               // Check how many times the query is executed.
+               var v int
+               if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+               if v != 1 { // TODO: need to kill the query, and v should be 0.
+                       dbt.Skipf("[WARN] expected val to be 1, got %d", v)
+               }
+
+               // Context is already canceled, so error should come before execution.
+               if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (1)"); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+
+               // The second insert query will fail, so the table has no changes.
+               if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+               if v != 1 {
+                       dbt.Skipf("[WARN] expected val to be 1, got %d", v)
+               }
+       })
+}
+
+func TestContextCancelQueryRow(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v INTEGER)")
+               dbt.mustExec("INSERT INTO test VALUES (1), (2), (3)")
+               ctx, cancel := context.WithCancel(context.Background())
+
+               rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM test")
+               if err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+
+               // the first row will be succeed.
+               var v int
+               if !rows.Next() {
+                       dbt.Fatalf("unexpected end")
+               }
+               if err := rows.Scan(&v); err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+
+               cancel()
+               // make sure the driver receives the cancel request.
+               time.Sleep(100 * time.Millisecond)
+
+               if rows.Next() {
+                       dbt.Errorf("expected end, but not")
+               }
+               if err := rows.Err(); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+       })
+}
+
+func TestContextCancelPrepare(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               ctx, cancel := context.WithCancel(context.Background())
+               cancel()
+               if _, err := dbt.db.PrepareContext(ctx, "SELECT 1"); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+       })
+}
+
+func TestContextCancelStmtExec(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v INTEGER)")
+               ctx, cancel := context.WithCancel(context.Background())
+               stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
+               if err != nil {
+                       dbt.Fatalf("unexpected error: %v", err)
+               }
+
+               // Delay execution for just a bit until db.ExecContext has begun.
+               defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
+
+               // This query will be canceled.
+               startTime := time.Now()
+               if _, err := stmt.ExecContext(ctx); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+               if d := time.Since(startTime); d > 500*time.Millisecond {
+                       dbt.Errorf("too long execution time: %s", d)
+               }
+
+               // Wait for the INSERT query to be done.
+               time.Sleep(time.Second)
+
+               // Check how many times the query is executed.
+               var v int
+               if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+               if v != 1 { // TODO: need to kill the query, and v should be 0.
+                       dbt.Skipf("[WARN] expected val to be 1, got %d", v)
+               }
+       })
+}
+
+func TestContextCancelStmtQuery(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v INTEGER)")
+               ctx, cancel := context.WithCancel(context.Background())
+               stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
+               if err != nil {
+                       dbt.Fatalf("unexpected error: %v", err)
+               }
+
+               // Delay execution for just a bit until db.ExecContext has begun.
+               defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
+
+               // This query will be canceled.
+               startTime := time.Now()
+               if _, err := stmt.QueryContext(ctx); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+               if d := time.Since(startTime); d > 500*time.Millisecond {
+                       dbt.Errorf("too long execution time: %s", d)
+               }
+
+               // Wait for the INSERT query has done.
+               time.Sleep(time.Second)
+
+               // Check how many times the query is executed.
+               var v int
+               if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+                       dbt.Fatalf("%s", err.Error())
+               }
+               if v != 1 { // TODO: need to kill the query, and v should be 0.
+                       dbt.Skipf("[WARN] expected val to be 1, got %d", v)
+               }
+       })
+}
+
+func TestContextCancelBegin(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v INTEGER)")
+               ctx, cancel := context.WithCancel(context.Background())
+               tx, err := dbt.db.BeginTx(ctx, nil)
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               // Delay execution for just a bit until db.ExecContext has begun.
+               defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
+
+               // This query will be canceled.
+               startTime := time.Now()
+               if _, err := tx.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+               if d := time.Since(startTime); d > 500*time.Millisecond {
+                       dbt.Errorf("too long execution time: %s", d)
+               }
+
+               // Transaction is canceled, so expect an error.
+               switch err := tx.Commit(); err {
+               case sql.ErrTxDone:
+                       // because the transaction has already been rollbacked.
+                       // the database/sql package watches ctx
+                       // and rollbacks when ctx is canceled.
+               case context.Canceled:
+                       // the database/sql package rollbacks on another goroutine,
+                       // so the transaction may not be rollbacked depending on goroutine scheduling.
+               default:
+                       dbt.Errorf("expected sql.ErrTxDone or context.Canceled, got %v", err)
+               }
+
+               // Context is canceled, so cannot begin a transaction.
+               if _, err := dbt.db.BeginTx(ctx, nil); err != context.Canceled {
+                       dbt.Errorf("expected context.Canceled, got %v", err)
+               }
+       })
+}
+
+func TestContextBeginIsolationLevel(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v INTEGER)")
+               ctx, cancel := context.WithCancel(context.Background())
+               defer cancel()
+
+               tx1, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
+                       Isolation: sql.LevelRepeatableRead,
+               })
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               tx2, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
+                       Isolation: sql.LevelReadCommitted,
+               })
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               _, err = tx1.ExecContext(ctx, "INSERT INTO test VALUES (1)")
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               var v int
+               row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+               if err := row.Scan(&v); err != nil {
+                       dbt.Fatal(err)
+               }
+               // Because writer transaction wasn't commited yet, it should be available
+               if v != 0 {
+                       dbt.Errorf("expected val to be 0, got %d", v)
+               }
+
+               err = tx1.Commit()
+               if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+               if err := row.Scan(&v); err != nil {
+                       dbt.Fatal(err)
+               }
+               // Data written by writer transaction is already commited, it should be selectable
+               if v != 1 {
+                       dbt.Errorf("expected val to be 1, got %d", v)
+               }
+               tx2.Commit()
+       })
+}
+
+func TestContextBeginReadOnly(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (v INTEGER)")
+               ctx, cancel := context.WithCancel(context.Background())
+               defer cancel()
+
+               tx, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
+                       ReadOnly: true,
+               })
+               if _, ok := err.(*MySQLError); ok {
+                       dbt.Skip("It seems that your MySQL does not support READ ONLY transactions")
+                       return
+               } else if err != nil {
+                       dbt.Fatal(err)
+               }
+
+               // INSERT queries fail in a READ ONLY transaction.
+               _, err = tx.ExecContext(ctx, "INSERT INTO test VALUES (1)")
+               if _, ok := err.(*MySQLError); !ok {
+                       dbt.Errorf("expected MySQLError, got %v", err)
+               }
+
+               // SELECT queries can be executed.
+               var v int
+               row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+               if err := row.Scan(&v); err != nil {
+                       dbt.Fatal(err)
+               }
+               if v != 0 {
+                       dbt.Errorf("expected val to be 0, got %d", v)
+               }
+
+               if err := tx.Commit(); err != nil {
+                       dbt.Fatal(err)
+               }
+       })
+}
+
+func TestRowsColumnTypes(t *testing.T) {
+       niNULL := sql.NullInt64{Int64: 0, Valid: false}
+       ni0 := sql.NullInt64{Int64: 0, Valid: true}
+       ni1 := sql.NullInt64{Int64: 1, Valid: true}
+       ni42 := sql.NullInt64{Int64: 42, Valid: true}
+       nfNULL := sql.NullFloat64{Float64: 0.0, Valid: false}
+       nf0 := sql.NullFloat64{Float64: 0.0, Valid: true}
+       nf1337 := sql.NullFloat64{Float64: 13.37, Valid: true}
+       nt0 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC), Valid: true}
+       nt1 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 100000000, time.UTC), Valid: true}
+       nt2 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 110000000, time.UTC), Valid: true}
+       nt6 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 111111000, time.UTC), Valid: true}
+       nd1 := NullTime{Time: time.Date(2006, 01, 02, 0, 0, 0, 0, time.UTC), Valid: true}
+       nd2 := NullTime{Time: time.Date(2006, 03, 04, 0, 0, 0, 0, time.UTC), Valid: true}
+       ndNULL := NullTime{Time: time.Time{}, Valid: false}
+       rbNULL := sql.RawBytes(nil)
+       rb0 := sql.RawBytes("0")
+       rb42 := sql.RawBytes("42")
+       rbTest := sql.RawBytes("Test")
+       rb0pad4 := sql.RawBytes("0\x00\x00\x00") // BINARY right-pads values with 0x00
+       rbx0 := sql.RawBytes("\x00")
+       rbx42 := sql.RawBytes("\x42")
+
+       var columns = []struct {
+               name             string
+               fieldType        string // type used when creating table schema
+               databaseTypeName string // actual type used by MySQL
+               scanType         reflect.Type
+               nullable         bool
+               precision        int64 // 0 if not ok
+               scale            int64
+               valuesIn         [3]string
+               valuesOut        [3]interface{}
+       }{
+               {"bit8null", "BIT(8)", "BIT", scanTypeRawBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]interface{}{rbx0, rbNULL, rbx42}},
+               {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]interface{}{niNULL, ni1, ni0}},
+               {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]interface{}{int8(1), int8(0), int8(0)}},
+               {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
+               {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]interface{}{int16(0), int16(-32768), int16(32767)}},
+               {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
+               {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
+               {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]interface{}{int32(0), int32(-1337), int32(42)}},
+               {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]interface{}{ni0, ni42, niNULL}},
+               {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]interface{}{int64(0), int64(65535), int64(-42)}},
+               {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]interface{}{niNULL, ni1, ni42}},
+               {"tinyuint", "TINYINT UNSIGNED NOT NULL", "TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]interface{}{uint8(0), uint8(255), uint8(42)}},
+               {"smalluint", "SMALLINT UNSIGNED NOT NULL", "SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint16(0), uint16(65535), uint16(42)}},
+               {"biguint", "BIGINT UNSIGNED NOT NULL", "BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint64(0), uint64(65535), uint64(42)}},
+               {"uint13", "INT(13) UNSIGNED NOT NULL", "INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]interface{}{uint32(0), uint32(1337), uint32(42)}},
+               {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float32(0), float32(42), float32(13.37)}},
+               {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
+               {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
+               {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float64(0), float64(42), float64(13.37)}},
+               {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
+               {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), sql.RawBytes("13.370000"), sql.RawBytes("1234.123456")}},
+               {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeRawBytes, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), rbNULL, sql.RawBytes("1234.123456")}},
+               {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), sql.RawBytes("13.3700"), sql.RawBytes("1234.1235")}},
+               {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeRawBytes, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), rbNULL, sql.RawBytes("1234.1235")}},
+               {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]interface{}{rb0, sql.RawBytes("13"), sql.RawBytes("-12345")}},
+               {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeRawBytes, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]interface{}{rb0, rbNULL, sql.RawBytes("-12345")}},
+               {"char25null", "CHAR(25)", "CHAR", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+               {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+               {"binary4null", "BINARY(4)", "BINARY", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0pad4, rbNULL, rbTest}},
+               {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+               {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+               {"tinytextnull", "TINYTEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+               {"blobnull", "BLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+               {"textnull", "TEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+               {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+               {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+               {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+               {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+               {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt0, nt0}},
+               {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt2}},
+               {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt6}},
+               {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]interface{}{nd1, ndNULL, nd2}},
+               {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]interface{}{uint16(2006), uint16(2000), uint16(1994)}},
+       }
+
+       schema := ""
+       values1 := ""
+       values2 := ""
+       values3 := ""
+       for _, column := range columns {
+               schema += fmt.Sprintf("`%s` %s, ", column.name, column.fieldType)
+               values1 += column.valuesIn[0] + ", "
+               values2 += column.valuesIn[1] + ", "
+               values3 += column.valuesIn[2] + ", "
+       }
+       schema = schema[:len(schema)-2]
+       values1 = values1[:len(values1)-2]
+       values2 = values2[:len(values2)-2]
+       values3 = values3[:len(values3)-2]
+
+       dsns := []string{
+               dsn + "&parseTime=true",
+               dsn + "&parseTime=false",
+       }
+       for _, testdsn := range dsns {
+               runTests(t, testdsn, func(dbt *DBTest) {
+                       dbt.mustExec("CREATE TABLE test (" + schema + ")")
+                       dbt.mustExec("INSERT INTO test VALUES (" + values1 + "), (" + values2 + "), (" + values3 + ")")
+
+                       rows, err := dbt.db.Query("SELECT * FROM test")
+                       if err != nil {
+                               t.Fatalf("Query: %v", err)
+                       }
+
+                       tt, err := rows.ColumnTypes()
+                       if err != nil {
+                               t.Fatalf("ColumnTypes: %v", err)
+                       }
+
+                       if len(tt) != len(columns) {
+                               t.Fatalf("unexpected number of columns: expected %d, got %d", len(columns), len(tt))
+                       }
+
+                       types := make([]reflect.Type, len(tt))
+                       for i, tp := range tt {
+                               column := columns[i]
+
+                               // Name
+                               name := tp.Name()
+                               if name != column.name {
+                                       t.Errorf("column name mismatch %s != %s", name, column.name)
+                                       continue
+                               }
+
+                               // DatabaseTypeName
+                               databaseTypeName := tp.DatabaseTypeName()
+                               if databaseTypeName != column.databaseTypeName {
+                                       t.Errorf("databasetypename name mismatch for column %q: %s != %s", name, databaseTypeName, column.databaseTypeName)
+                                       continue
+                               }
+
+                               // ScanType
+                               scanType := tp.ScanType()
+                               if scanType != column.scanType {
+                                       if scanType == nil {
+                                               t.Errorf("scantype is null for column %q", name)
+                                       } else {
+                                               t.Errorf("scantype mismatch for column %q: %s != %s", name, scanType.Name(), column.scanType.Name())
+                                       }
+                                       continue
+                               }
+                               types[i] = scanType
+
+                               // Nullable
+                               nullable, ok := tp.Nullable()
+                               if !ok {
+                                       t.Errorf("nullable not ok %q", name)
+                                       continue
+                               }
+                               if nullable != column.nullable {
+                                       t.Errorf("nullable mismatch for column %q: %t != %t", name, nullable, column.nullable)
+                               }
+
+                               // Length
+                               // length, ok := tp.Length()
+                               // if length != column.length {
+                               //      if !ok {
+                               //              t.Errorf("length not ok for column %q", name)
+                               //      } else {
+                               //              t.Errorf("length mismatch for column %q: %d != %d", name, length, column.length)
+                               //      }
+                               //      continue
+                               // }
+
+                               // Precision and Scale
+                               precision, scale, ok := tp.DecimalSize()
+                               if precision != column.precision {
+                                       if !ok {
+                                               t.Errorf("precision not ok for column %q", name)
+                                       } else {
+                                               t.Errorf("precision mismatch for column %q: %d != %d", name, precision, column.precision)
+                                       }
+                                       continue
+                               }
+                               if scale != column.scale {
+                                       if !ok {
+                                               t.Errorf("scale not ok for column %q", name)
+                                       } else {
+                                               t.Errorf("scale mismatch for column %q: %d != %d", name, scale, column.scale)
+                                       }
+                                       continue
+                               }
+                       }
+
+                       values := make([]interface{}, len(tt))
+                       for i := range values {
+                               values[i] = reflect.New(types[i]).Interface()
+                       }
+                       i := 0
+                       for rows.Next() {
+                               err = rows.Scan(values...)
+                               if err != nil {
+                                       t.Fatalf("failed to scan values in %v", err)
+                               }
+                               for j := range values {
+                                       value := reflect.ValueOf(values[j]).Elem().Interface()
+                                       if !reflect.DeepEqual(value, columns[j].valuesOut[i]) {
+                                               if columns[j].scanType == scanTypeRawBytes {
+                                                       t.Errorf("row %d, column %d: %v != %v", i, j, string(value.(sql.RawBytes)), string(columns[j].valuesOut[i].(sql.RawBytes)))
+                                               } else {
+                                                       t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
+                                               }
+                                       }
+                               }
+                               i++
+                       }
+                       if i != 3 {
+                               t.Errorf("expected 3 rows, got %d", i)
+                       }
+
+                       if err := rows.Close(); err != nil {
+                               t.Errorf("error closing rows: %s", err)
+                       }
+               })
+       }
+}
+
+func TestValuerWithValueReceiverGivenNilValue(t *testing.T) {
+       runTests(t, dsn, func(dbt *DBTest) {
+               dbt.mustExec("CREATE TABLE test (value VARCHAR(255))")
+               dbt.db.Exec("INSERT INTO test VALUES (?)", (*testValuer)(nil))
+               // This test will panic on the INSERT if ConvertValue() does not check for typed nil before calling Value()
+       })
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644 (file)
index 0000000..be014ba
--- /dev/null
@@ -0,0 +1,611 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "crypto/rsa"
+       "crypto/tls"
+       "errors"
+       "fmt"
+       "net"
+       "net/url"
+       "sort"
+       "strconv"
+       "strings"
+       "time"
+)
+
+var (
+       errInvalidDSNUnescaped       = errors.New("invalid DSN: did you forget to escape a param value?")
+       errInvalidDSNAddr            = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+       errInvalidDSNNoSlash         = errors.New("invalid DSN: missing the slash separating the database name")
+       errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
+type Config struct {
+       User             string            // Username
+       Passwd           string            // Password (requires User)
+       Net              string            // Network type
+       Addr             string            // Network address (requires Net)
+       DBName           string            // Database name
+       Params           map[string]string // Connection parameters
+       Collation        string            // Connection collation
+       Loc              *time.Location    // Location for time.Time values
+       MaxAllowedPacket int               // Max packet size allowed
+       ServerPubKey     string            // Server public key name
+       pubKey           *rsa.PublicKey    // Server public key
+       TLSConfig        string            // TLS configuration name
+       tls              *tls.Config       // TLS configuration
+       Timeout          time.Duration     // Dial timeout
+       ReadTimeout      time.Duration     // I/O read timeout
+       WriteTimeout     time.Duration     // I/O write timeout
+
+       AllowAllFiles           bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+       AllowCleartextPasswords bool // Allows the cleartext client side plugin
+       AllowNativePasswords    bool // Allows the native password authentication method
+       AllowOldPasswords       bool // Allows the old insecure password method
+       ClientFoundRows         bool // Return number of matching rows instead of rows changed
+       ColumnsWithAlias        bool // Prepend table alias to column names
+       InterpolateParams       bool // Interpolate placeholders into query string
+       MultiStatements         bool // Allow multiple statements in one query
+       ParseTime               bool // Parse time values to time.Time
+       RejectReadOnly          bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+       return &Config{
+               Collation:            defaultCollation,
+               Loc:                  time.UTC,
+               MaxAllowedPacket:     defaultMaxAllowedPacket,
+               AllowNativePasswords: true,
+       }
+}
+
+func (cfg *Config) normalize() error {
+       if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+               return errInvalidDSNUnsafeCollation
+       }
+
+       // Set default network if empty
+       if cfg.Net == "" {
+               cfg.Net = "tcp"
+       }
+
+       // Set default address if empty
+       if cfg.Addr == "" {
+               switch cfg.Net {
+               case "tcp":
+                       cfg.Addr = "127.0.0.1:3306"
+               case "unix":
+                       cfg.Addr = "/tmp/mysql.sock"
+               default:
+                       return errors.New("default addr for network '" + cfg.Net + "' unknown")
+               }
+
+       } else if cfg.Net == "tcp" {
+               cfg.Addr = ensureHavePort(cfg.Addr)
+       }
+
+       if cfg.tls != nil {
+               if cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
+                       host, _, err := net.SplitHostPort(cfg.Addr)
+                       if err == nil {
+                               cfg.tls.ServerName = host
+                       }
+               }
+       }
+
+       return nil
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+       var buf bytes.Buffer
+
+       // [username[:password]@]
+       if len(cfg.User) > 0 {
+               buf.WriteString(cfg.User)
+               if len(cfg.Passwd) > 0 {
+                       buf.WriteByte(':')
+                       buf.WriteString(cfg.Passwd)
+               }
+               buf.WriteByte('@')
+       }
+
+       // [protocol[(address)]]
+       if len(cfg.Net) > 0 {
+               buf.WriteString(cfg.Net)
+               if len(cfg.Addr) > 0 {
+                       buf.WriteByte('(')
+                       buf.WriteString(cfg.Addr)
+                       buf.WriteByte(')')
+               }
+       }
+
+       // /dbname
+       buf.WriteByte('/')
+       buf.WriteString(cfg.DBName)
+
+       // [?param1=value1&...&paramN=valueN]
+       hasParam := false
+
+       if cfg.AllowAllFiles {
+               hasParam = true
+               buf.WriteString("?allowAllFiles=true")
+       }
+
+       if cfg.AllowCleartextPasswords {
+               if hasParam {
+                       buf.WriteString("&allowCleartextPasswords=true")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?allowCleartextPasswords=true")
+               }
+       }
+
+       if !cfg.AllowNativePasswords {
+               if hasParam {
+                       buf.WriteString("&allowNativePasswords=false")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?allowNativePasswords=false")
+               }
+       }
+
+       if cfg.AllowOldPasswords {
+               if hasParam {
+                       buf.WriteString("&allowOldPasswords=true")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?allowOldPasswords=true")
+               }
+       }
+
+       if cfg.ClientFoundRows {
+               if hasParam {
+                       buf.WriteString("&clientFoundRows=true")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?clientFoundRows=true")
+               }
+       }
+
+       if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+               if hasParam {
+                       buf.WriteString("&collation=")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?collation=")
+               }
+               buf.WriteString(col)
+       }
+
+       if cfg.ColumnsWithAlias {
+               if hasParam {
+                       buf.WriteString("&columnsWithAlias=true")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?columnsWithAlias=true")
+               }
+       }
+
+       if cfg.InterpolateParams {
+               if hasParam {
+                       buf.WriteString("&interpolateParams=true")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?interpolateParams=true")
+               }
+       }
+
+       if cfg.Loc != time.UTC && cfg.Loc != nil {
+               if hasParam {
+                       buf.WriteString("&loc=")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?loc=")
+               }
+               buf.WriteString(url.QueryEscape(cfg.Loc.String()))
+       }
+
+       if cfg.MultiStatements {
+               if hasParam {
+                       buf.WriteString("&multiStatements=true")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?multiStatements=true")
+               }
+       }
+
+       if cfg.ParseTime {
+               if hasParam {
+                       buf.WriteString("&parseTime=true")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?parseTime=true")
+               }
+       }
+
+       if cfg.ReadTimeout > 0 {
+               if hasParam {
+                       buf.WriteString("&readTimeout=")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?readTimeout=")
+               }
+               buf.WriteString(cfg.ReadTimeout.String())
+       }
+
+       if cfg.RejectReadOnly {
+               if hasParam {
+                       buf.WriteString("&rejectReadOnly=true")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?rejectReadOnly=true")
+               }
+       }
+
+       if len(cfg.ServerPubKey) > 0 {
+               if hasParam {
+                       buf.WriteString("&serverPubKey=")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?serverPubKey=")
+               }
+               buf.WriteString(url.QueryEscape(cfg.ServerPubKey))
+       }
+
+       if cfg.Timeout > 0 {
+               if hasParam {
+                       buf.WriteString("&timeout=")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?timeout=")
+               }
+               buf.WriteString(cfg.Timeout.String())
+       }
+
+       if len(cfg.TLSConfig) > 0 {
+               if hasParam {
+                       buf.WriteString("&tls=")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?tls=")
+               }
+               buf.WriteString(url.QueryEscape(cfg.TLSConfig))
+       }
+
+       if cfg.WriteTimeout > 0 {
+               if hasParam {
+                       buf.WriteString("&writeTimeout=")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?writeTimeout=")
+               }
+               buf.WriteString(cfg.WriteTimeout.String())
+       }
+
+       if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
+               if hasParam {
+                       buf.WriteString("&maxAllowedPacket=")
+               } else {
+                       hasParam = true
+                       buf.WriteString("?maxAllowedPacket=")
+               }
+               buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
+
+       }
+
+       // other params
+       if cfg.Params != nil {
+               var params []string
+               for param := range cfg.Params {
+                       params = append(params, param)
+               }
+               sort.Strings(params)
+               for _, param := range params {
+                       if hasParam {
+                               buf.WriteByte('&')
+                       } else {
+                               hasParam = true
+                               buf.WriteByte('?')
+                       }
+
+                       buf.WriteString(param)
+                       buf.WriteByte('=')
+                       buf.WriteString(url.QueryEscape(cfg.Params[param]))
+               }
+       }
+
+       return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+       // New config with some default values
+       cfg = NewConfig()
+
+       // [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
+       // Find the last '/' (since the password or the net addr might contain a '/')
+       foundSlash := false
+       for i := len(dsn) - 1; i >= 0; i-- {
+               if dsn[i] == '/' {
+                       foundSlash = true
+                       var j, k int
+
+                       // left part is empty if i <= 0
+                       if i > 0 {
+                               // [username[:password]@][protocol[(address)]]
+                               // Find the last '@' in dsn[:i]
+                               for j = i; j >= 0; j-- {
+                                       if dsn[j] == '@' {
+                                               // username[:password]
+                                               // Find the first ':' in dsn[:j]
+                                               for k = 0; k < j; k++ {
+                                                       if dsn[k] == ':' {
+                                                               cfg.Passwd = dsn[k+1 : j]
+                                                               break
+                                                       }
+                                               }
+                                               cfg.User = dsn[:k]
+
+                                               break
+                                       }
+                               }
+
+                               // [protocol[(address)]]
+                               // Find the first '(' in dsn[j+1:i]
+                               for k = j + 1; k < i; k++ {
+                                       if dsn[k] == '(' {
+                                               // dsn[i-1] must be == ')' if an address is specified
+                                               if dsn[i-1] != ')' {
+                                                       if strings.ContainsRune(dsn[k+1:i], ')') {
+                                                               return nil, errInvalidDSNUnescaped
+                                                       }
+                                                       return nil, errInvalidDSNAddr
+                                               }
+                                               cfg.Addr = dsn[k+1 : i-1]
+                                               break
+                                       }
+                               }
+                               cfg.Net = dsn[j+1 : k]
+                       }
+
+                       // dbname[?param1=value1&...&paramN=valueN]
+                       // Find the first '?' in dsn[i+1:]
+                       for j = i + 1; j < len(dsn); j++ {
+                               if dsn[j] == '?' {
+                                       if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+                                               return
+                                       }
+                                       break
+                               }
+                       }
+                       cfg.DBName = dsn[i+1 : j]
+
+                       break
+               }
+       }
+
+       if !foundSlash && len(dsn) > 0 {
+               return nil, errInvalidDSNNoSlash
+       }
+
+       if err = cfg.normalize(); err != nil {
+               return nil, err
+       }
+       return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+       for _, v := range strings.Split(params, "&") {
+               param := strings.SplitN(v, "=", 2)
+               if len(param) != 2 {
+                       continue
+               }
+
+               // cfg params
+               switch value := param[1]; param[0] {
+               // Disable INFILE whitelist / enable all files
+               case "allowAllFiles":
+                       var isBool bool
+                       cfg.AllowAllFiles, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // Use cleartext authentication mode (MySQL 5.5.10+)
+               case "allowCleartextPasswords":
+                       var isBool bool
+                       cfg.AllowCleartextPasswords, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // Use native password authentication
+               case "allowNativePasswords":
+                       var isBool bool
+                       cfg.AllowNativePasswords, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // Use old authentication mode (pre MySQL 4.1)
+               case "allowOldPasswords":
+                       var isBool bool
+                       cfg.AllowOldPasswords, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // Switch "rowsAffected" mode
+               case "clientFoundRows":
+                       var isBool bool
+                       cfg.ClientFoundRows, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // Collation
+               case "collation":
+                       cfg.Collation = value
+                       break
+
+               case "columnsWithAlias":
+                       var isBool bool
+                       cfg.ColumnsWithAlias, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // Compression
+               case "compress":
+                       return errors.New("compression not implemented yet")
+
+               // Enable client side placeholder substitution
+               case "interpolateParams":
+                       var isBool bool
+                       cfg.InterpolateParams, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // Time Location
+               case "loc":
+                       if value, err = url.QueryUnescape(value); err != nil {
+                               return
+                       }
+                       cfg.Loc, err = time.LoadLocation(value)
+                       if err != nil {
+                               return
+                       }
+
+               // multiple statements in one query
+               case "multiStatements":
+                       var isBool bool
+                       cfg.MultiStatements, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // time.Time parsing
+               case "parseTime":
+                       var isBool bool
+                       cfg.ParseTime, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // I/O read Timeout
+               case "readTimeout":
+                       cfg.ReadTimeout, err = time.ParseDuration(value)
+                       if err != nil {
+                               return
+                       }
+
+               // Reject read-only connections
+               case "rejectReadOnly":
+                       var isBool bool
+                       cfg.RejectReadOnly, isBool = readBool(value)
+                       if !isBool {
+                               return errors.New("invalid bool value: " + value)
+                       }
+
+               // Server public key
+               case "serverPubKey":
+                       name, err := url.QueryUnescape(value)
+                       if err != nil {
+                               return fmt.Errorf("invalid value for server pub key name: %v", err)
+                       }
+
+                       if pubKey := getServerPubKey(name); pubKey != nil {
+                               cfg.ServerPubKey = name
+                               cfg.pubKey = pubKey
+                       } else {
+                               return errors.New("invalid value / unknown server pub key name: " + name)
+                       }
+
+               // Strict mode
+               case "strict":
+                       panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
+               // Dial Timeout
+               case "timeout":
+                       cfg.Timeout, err = time.ParseDuration(value)
+                       if err != nil {
+                               return
+                       }
+
+               // TLS-Encryption
+               case "tls":
+                       boolValue, isBool := readBool(value)
+                       if isBool {
+                               if boolValue {
+                                       cfg.TLSConfig = "true"
+                                       cfg.tls = &tls.Config{}
+                               } else {
+                                       cfg.TLSConfig = "false"
+                               }
+                       } else if vl := strings.ToLower(value); vl == "skip-verify" {
+                               cfg.TLSConfig = vl
+                               cfg.tls = &tls.Config{InsecureSkipVerify: true}
+                       } else {
+                               name, err := url.QueryUnescape(value)
+                               if err != nil {
+                                       return fmt.Errorf("invalid value for TLS config name: %v", err)
+                               }
+
+                               if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
+                                       cfg.TLSConfig = name
+                                       cfg.tls = tlsConfig
+                               } else {
+                                       return errors.New("invalid value / unknown config name: " + name)
+                               }
+                       }
+
+               // I/O write Timeout
+               case "writeTimeout":
+                       cfg.WriteTimeout, err = time.ParseDuration(value)
+                       if err != nil {
+                               return
+                       }
+               case "maxAllowedPacket":
+                       cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+                       if err != nil {
+                               return
+                       }
+               default:
+                       // lazy init
+                       if cfg.Params == nil {
+                               cfg.Params = make(map[string]string)
+                       }
+
+                       if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+                               return
+                       }
+               }
+       }
+
+       return
+}
+
+func ensureHavePort(addr string) string {
+       if _, _, err := net.SplitHostPort(addr); err != nil {
+               return net.JoinHostPort(addr, "3306")
+       }
+       return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/go-sql-driver/mysql/dsn_test.go
new file mode 100644 (file)
index 0000000..1cd0954
--- /dev/null
@@ -0,0 +1,331 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "crypto/tls"
+       "fmt"
+       "net/url"
+       "reflect"
+       "testing"
+       "time"
+)
+
+var testDSNs = []struct {
+       in  string
+       out *Config
+}{{
+       "username:password@protocol(address)/dbname?param=value",
+       &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
+       &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true},
+}, {
+       "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
+       &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true, MultiStatements: true},
+}, {
+       "user@unix(/path/to/socket)/dbname?charset=utf8",
+       &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
+       &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "true"},
+}, {
+       "user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
+       &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "skip-verify"},
+}, {
+       "user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216",
+       &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216},
+}, {
+       "user:password@/dbname?allowNativePasswords=false&maxAllowedPacket=0",
+       &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowNativePasswords: false},
+}, {
+       "user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
+       &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "/dbname",
+       &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "@/",
+       &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "/",
+       &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "",
+       &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "user:p@/ssword@/",
+       &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "unix/?arg=%2Fsome%2Fpath.ext",
+       &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "tcp(127.0.0.1)/dbname",
+       &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+       "tcp(de:ad:be:ef::ca:fe)/dbname",
+       &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+},
+}
+
+func TestDSNParser(t *testing.T) {
+       for i, tst := range testDSNs {
+               cfg, err := ParseDSN(tst.in)
+               if err != nil {
+                       t.Error(err.Error())
+               }
+
+               // pointer not static
+               cfg.tls = nil
+
+               if !reflect.DeepEqual(cfg, tst.out) {
+                       t.Errorf("%d. ParseDSN(%q) mismatch:\ngot  %+v\nwant %+v", i, tst.in, cfg, tst.out)
+               }
+       }
+}
+
+func TestDSNParserInvalid(t *testing.T) {
+       var invalidDSNs = []string{
+               "@net(addr/",                  // no closing brace
+               "@tcp(/",                      // no closing brace
+               "tcp(/",                       // no closing brace
+               "(/",                          // no closing brace
+               "net(addr)//",                 // unescaped
+               "User:pass@tcp(1.2.3.4:3306)", // no trailing slash
+               "net()/",                      // unknown default addr
+               //"/dbname?arg=/some/unescaped/path",
+       }
+
+       for i, tst := range invalidDSNs {
+               if _, err := ParseDSN(tst); err == nil {
+                       t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
+               }
+       }
+}
+
+func TestDSNReformat(t *testing.T) {
+       for i, tst := range testDSNs {
+               dsn1 := tst.in
+               cfg1, err := ParseDSN(dsn1)
+               if err != nil {
+                       t.Error(err.Error())
+                       continue
+               }
+               cfg1.tls = nil // pointer not static
+               res1 := fmt.Sprintf("%+v", cfg1)
+
+               dsn2 := cfg1.FormatDSN()
+               cfg2, err := ParseDSN(dsn2)
+               if err != nil {
+                       t.Error(err.Error())
+                       continue
+               }
+               cfg2.tls = nil // pointer not static
+               res2 := fmt.Sprintf("%+v", cfg2)
+
+               if res1 != res2 {
+                       t.Errorf("%d. %q does not match %q", i, res2, res1)
+               }
+       }
+}
+
+func TestDSNServerPubKey(t *testing.T) {
+       baseDSN := "User:password@tcp(localhost:5555)/dbname?serverPubKey="
+
+       RegisterServerPubKey("testKey", testPubKeyRSA)
+       defer DeregisterServerPubKey("testKey")
+
+       tst := baseDSN + "testKey"
+       cfg, err := ParseDSN(tst)
+       if err != nil {
+               t.Error(err.Error())
+       }
+
+       if cfg.ServerPubKey != "testKey" {
+               t.Errorf("unexpected cfg.ServerPubKey value: %v", cfg.ServerPubKey)
+       }
+       if cfg.pubKey != testPubKeyRSA {
+               t.Error("pub key pointer doesn't match")
+       }
+
+       // Key is missing
+       tst = baseDSN + "invalid_name"
+       cfg, err = ParseDSN(tst)
+       if err == nil {
+               t.Errorf("invalid name in DSN (%s) but did not error. Got config: %#v", tst, cfg)
+       }
+}
+
+func TestDSNServerPubKeyQueryEscape(t *testing.T) {
+       const name = "&%!:"
+       dsn := "User:password@tcp(localhost:5555)/dbname?serverPubKey=" + url.QueryEscape(name)
+
+       RegisterServerPubKey(name, testPubKeyRSA)
+       defer DeregisterServerPubKey(name)
+
+       cfg, err := ParseDSN(dsn)
+       if err != nil {
+               t.Error(err.Error())
+       }
+
+       if cfg.pubKey != testPubKeyRSA {
+               t.Error("pub key pointer doesn't match")
+       }
+}
+
+func TestDSNWithCustomTLS(t *testing.T) {
+       baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
+       tlsCfg := tls.Config{}
+
+       RegisterTLSConfig("utils_test", &tlsCfg)
+       defer DeregisterTLSConfig("utils_test")
+
+       // Custom TLS is missing
+       tst := baseDSN + "invalid_tls"
+       cfg, err := ParseDSN(tst)
+       if err == nil {
+               t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
+       }
+
+       tst = baseDSN + "utils_test"
+
+       // Custom TLS with a server name
+       name := "foohost"
+       tlsCfg.ServerName = name
+       cfg, err = ParseDSN(tst)
+
+       if err != nil {
+               t.Error(err.Error())
+       } else if cfg.tls.ServerName != name {
+               t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
+       }
+
+       // Custom TLS without a server name
+       name = "localhost"
+       tlsCfg.ServerName = ""
+       cfg, err = ParseDSN(tst)
+
+       if err != nil {
+               t.Error(err.Error())
+       } else if cfg.tls.ServerName != name {
+               t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
+       } else if tlsCfg.ServerName != "" {
+               t.Errorf("tlsCfg was mutated ServerName (%s) should be empty parsing DSN (%s).", name, tst)
+       }
+}
+
+func TestDSNTLSConfig(t *testing.T) {
+       expectedServerName := "example.com"
+       dsn := "tcp(example.com:1234)/?tls=true"
+
+       cfg, err := ParseDSN(dsn)
+       if err != nil {
+               t.Error(err.Error())
+       }
+       if cfg.tls == nil {
+               t.Error("cfg.tls should not be nil")
+       }
+       if cfg.tls.ServerName != expectedServerName {
+               t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
+       }
+
+       dsn = "tcp(example.com)/?tls=true"
+       cfg, err = ParseDSN(dsn)
+       if err != nil {
+               t.Error(err.Error())
+       }
+       if cfg.tls == nil {
+               t.Error("cfg.tls should not be nil")
+       }
+       if cfg.tls.ServerName != expectedServerName {
+               t.Errorf("cfg.tls.ServerName should be %q, got %q (host without port)", expectedServerName, cfg.tls.ServerName)
+       }
+}
+
+func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
+       const configKey = "&%!:"
+       dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
+       name := "foohost"
+       tlsCfg := tls.Config{ServerName: name}
+
+       RegisterTLSConfig(configKey, &tlsCfg)
+       defer DeregisterTLSConfig(configKey)
+
+       cfg, err := ParseDSN(dsn)
+
+       if err != nil {
+               t.Error(err.Error())
+       } else if cfg.tls.ServerName != name {
+               t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn)
+       }
+}
+
+func TestDSNUnsafeCollation(t *testing.T) {
+       _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
+       if err != errInvalidDSNUnsafeCollation {
+               t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err)
+       }
+
+       _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
+       if err != nil {
+               t.Errorf("expected %v, got %v", nil, err)
+       }
+
+       _, err = ParseDSN("/dbname?collation=gbk_chinese_ci")
+       if err != nil {
+               t.Errorf("expected %v, got %v", nil, err)
+       }
+
+       _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
+       if err != nil {
+               t.Errorf("expected %v, got %v", nil, err)
+       }
+
+       _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
+       if err != nil {
+               t.Errorf("expected %v, got %v", nil, err)
+       }
+
+       _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
+       if err != nil {
+               t.Errorf("expected %v, got %v", nil, err)
+       }
+
+       _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
+       if err != nil {
+               t.Errorf("expected %v, got %v", nil, err)
+       }
+}
+
+func TestParamsAreSorted(t *testing.T) {
+       expected := "/dbname?interpolateParams=true&foobar=baz&quux=loo"
+       cfg := NewConfig()
+       cfg.DBName = "dbname"
+       cfg.InterpolateParams = true
+       cfg.Params = map[string]string{
+               "quux":   "loo",
+               "foobar": "baz",
+       }
+       actual := cfg.FormatDSN()
+       if actual != expected {
+               t.Errorf("generic Config.Params were not sorted: want %#v, got %#v", expected, actual)
+       }
+}
+
+func BenchmarkParseDSN(b *testing.B) {
+       b.ReportAllocs()
+
+       for i := 0; i < b.N; i++ {
+               for _, tst := range testDSNs {
+                       if _, err := ParseDSN(tst.in); err != nil {
+                               b.Error(err.Error())
+                       }
+               }
+       }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
new file mode 100644 (file)
index 0000000..760782f
--- /dev/null
@@ -0,0 +1,65 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "errors"
+       "fmt"
+       "log"
+       "os"
+)
+
+// Various errors the driver might return. Can change between driver versions.
+var (
+       ErrInvalidConn       = errors.New("invalid connection")
+       ErrMalformPkt        = errors.New("malformed packet")
+       ErrNoTLS             = errors.New("TLS requested but server does not support TLS")
+       ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+       ErrNativePassword    = errors.New("this user requires mysql native password authentication.")
+       ErrOldPassword       = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+       ErrUnknownPlugin     = errors.New("this authentication plugin is not supported")
+       ErrOldProtocol       = errors.New("MySQL server does not support required protocol 41+")
+       ErrPktSync           = errors.New("commands out of sync. You can't run this command now")
+       ErrPktSyncMul        = errors.New("commands out of sync. Did you run multiple statements at once?")
+       ErrPktTooLarge       = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+       ErrBusyBuffer        = errors.New("busy buffer")
+
+       // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+       // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+       // to trigger a resend.
+       // See https://github.com/go-sql-driver/mysql/pull/302
+       errBadConnNoWrite = errors.New("bad connection")
+)
+
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+
+// Logger is used to log critical error messages.
+type Logger interface {
+       Print(v ...interface{})
+}
+
+// SetLogger is used to set the logger for critical errors.
+// The initial logger is os.Stderr.
+func SetLogger(logger Logger) error {
+       if logger == nil {
+               return errors.New("logger is nil")
+       }
+       errLog = logger
+       return nil
+}
+
+// MySQLError is an error type which represents a single MySQL error
+type MySQLError struct {
+       Number  uint16
+       Message string
+}
+
+func (me *MySQLError) Error() string {
+       return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors_test.go b/vendor/github.com/go-sql-driver/mysql/errors_test.go
new file mode 100644 (file)
index 0000000..96f9126
--- /dev/null
@@ -0,0 +1,42 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "log"
+       "testing"
+)
+
+func TestErrorsSetLogger(t *testing.T) {
+       previous := errLog
+       defer func() {
+               errLog = previous
+       }()
+
+       // set up logger
+       const expected = "prefix: test\n"
+       buffer := bytes.NewBuffer(make([]byte, 0, 64))
+       logger := log.New(buffer, "prefix: ", 0)
+
+       // print
+       SetLogger(logger)
+       errLog.Print("test")
+
+       // check result
+       if actual := buffer.String(); actual != expected {
+               t.Errorf("expected %q, got %q", expected, actual)
+       }
+}
+
+func TestErrorsStrictIgnoreNotes(t *testing.T) {
+       runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) {
+               dbt.mustExec("DROP TABLE IF EXISTS does_not_exist")
+       })
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644 (file)
index 0000000..e1e2ece
--- /dev/null
@@ -0,0 +1,194 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "database/sql"
+       "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+       switch mf.fieldType {
+       case fieldTypeBit:
+               return "BIT"
+       case fieldTypeBLOB:
+               if mf.charSet != collations[binaryCollation] {
+                       return "TEXT"
+               }
+               return "BLOB"
+       case fieldTypeDate:
+               return "DATE"
+       case fieldTypeDateTime:
+               return "DATETIME"
+       case fieldTypeDecimal:
+               return "DECIMAL"
+       case fieldTypeDouble:
+               return "DOUBLE"
+       case fieldTypeEnum:
+               return "ENUM"
+       case fieldTypeFloat:
+               return "FLOAT"
+       case fieldTypeGeometry:
+               return "GEOMETRY"
+       case fieldTypeInt24:
+               return "MEDIUMINT"
+       case fieldTypeJSON:
+               return "JSON"
+       case fieldTypeLong:
+               return "INT"
+       case fieldTypeLongBLOB:
+               if mf.charSet != collations[binaryCollation] {
+                       return "LONGTEXT"
+               }
+               return "LONGBLOB"
+       case fieldTypeLongLong:
+               return "BIGINT"
+       case fieldTypeMediumBLOB:
+               if mf.charSet != collations[binaryCollation] {
+                       return "MEDIUMTEXT"
+               }
+               return "MEDIUMBLOB"
+       case fieldTypeNewDate:
+               return "DATE"
+       case fieldTypeNewDecimal:
+               return "DECIMAL"
+       case fieldTypeNULL:
+               return "NULL"
+       case fieldTypeSet:
+               return "SET"
+       case fieldTypeShort:
+               return "SMALLINT"
+       case fieldTypeString:
+               if mf.charSet == collations[binaryCollation] {
+                       return "BINARY"
+               }
+               return "CHAR"
+       case fieldTypeTime:
+               return "TIME"
+       case fieldTypeTimestamp:
+               return "TIMESTAMP"
+       case fieldTypeTiny:
+               return "TINYINT"
+       case fieldTypeTinyBLOB:
+               if mf.charSet != collations[binaryCollation] {
+                       return "TINYTEXT"
+               }
+               return "TINYBLOB"
+       case fieldTypeVarChar:
+               if mf.charSet == collations[binaryCollation] {
+                       return "VARBINARY"
+               }
+               return "VARCHAR"
+       case fieldTypeVarString:
+               if mf.charSet == collations[binaryCollation] {
+                       return "VARBINARY"
+               }
+               return "VARCHAR"
+       case fieldTypeYear:
+               return "YEAR"
+       default:
+               return ""
+       }
+}
+
+var (
+       scanTypeFloat32   = reflect.TypeOf(float32(0))
+       scanTypeFloat64   = reflect.TypeOf(float64(0))
+       scanTypeInt8      = reflect.TypeOf(int8(0))
+       scanTypeInt16     = reflect.TypeOf(int16(0))
+       scanTypeInt32     = reflect.TypeOf(int32(0))
+       scanTypeInt64     = reflect.TypeOf(int64(0))
+       scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+       scanTypeNullInt   = reflect.TypeOf(sql.NullInt64{})
+       scanTypeNullTime  = reflect.TypeOf(NullTime{})
+       scanTypeUint8     = reflect.TypeOf(uint8(0))
+       scanTypeUint16    = reflect.TypeOf(uint16(0))
+       scanTypeUint32    = reflect.TypeOf(uint32(0))
+       scanTypeUint64    = reflect.TypeOf(uint64(0))
+       scanTypeRawBytes  = reflect.TypeOf(sql.RawBytes{})
+       scanTypeUnknown   = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+       tableName string
+       name      string
+       length    uint32
+       flags     fieldFlag
+       fieldType fieldType
+       decimals  byte
+       charSet   uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+       switch mf.fieldType {
+       case fieldTypeTiny:
+               if mf.flags&flagNotNULL != 0 {
+                       if mf.flags&flagUnsigned != 0 {
+                               return scanTypeUint8
+                       }
+                       return scanTypeInt8
+               }
+               return scanTypeNullInt
+
+       case fieldTypeShort, fieldTypeYear:
+               if mf.flags&flagNotNULL != 0 {
+                       if mf.flags&flagUnsigned != 0 {
+                               return scanTypeUint16
+                       }
+                       return scanTypeInt16
+               }
+               return scanTypeNullInt
+
+       case fieldTypeInt24, fieldTypeLong:
+               if mf.flags&flagNotNULL != 0 {
+                       if mf.flags&flagUnsigned != 0 {
+                               return scanTypeUint32
+                       }
+                       return scanTypeInt32
+               }
+               return scanTypeNullInt
+
+       case fieldTypeLongLong:
+               if mf.flags&flagNotNULL != 0 {
+                       if mf.flags&flagUnsigned != 0 {
+                               return scanTypeUint64
+                       }
+                       return scanTypeInt64
+               }
+               return scanTypeNullInt
+
+       case fieldTypeFloat:
+               if mf.flags&flagNotNULL != 0 {
+                       return scanTypeFloat32
+               }
+               return scanTypeNullFloat
+
+       case fieldTypeDouble:
+               if mf.flags&flagNotNULL != 0 {
+                       return scanTypeFloat64
+               }
+               return scanTypeNullFloat
+
+       case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+               fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+               fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+               fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+               fieldTypeTime:
+               return scanTypeRawBytes
+
+       case fieldTypeDate, fieldTypeNewDate,
+               fieldTypeTimestamp, fieldTypeDateTime:
+               // NullTime is always returned for more consistent behavior as it can
+               // handle both cases of parseTime regardless if the field is nullable.
+               return scanTypeNullTime
+
+       default:
+               return scanTypeUnknown
+       }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
new file mode 100644 (file)
index 0000000..273cb0b
--- /dev/null
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "fmt"
+       "io"
+       "os"
+       "strings"
+       "sync"
+)
+
+var (
+       fileRegister       map[string]bool
+       fileRegisterLock   sync.RWMutex
+       readerRegister     map[string]func() io.Reader
+       readerRegisterLock sync.RWMutex
+)
+
+// RegisterLocalFile adds the given file to the file whitelist,
+// so that it can be used by "LOAD DATA LOCAL INFILE <filepath>".
+// Alternatively you can allow the use of all local files with
+// the DSN parameter 'allowAllFiles=true'
+//
+//  filePath := "/home/gopher/data.csv"
+//  mysql.RegisterLocalFile(filePath)
+//  err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
+//  if err != nil {
+//  ...
+//
+func RegisterLocalFile(filePath string) {
+       fileRegisterLock.Lock()
+       // lazy map init
+       if fileRegister == nil {
+               fileRegister = make(map[string]bool)
+       }
+
+       fileRegister[strings.Trim(filePath, `"`)] = true
+       fileRegisterLock.Unlock()
+}
+
+// DeregisterLocalFile removes the given filepath from the whitelist.
+func DeregisterLocalFile(filePath string) {
+       fileRegisterLock.Lock()
+       delete(fileRegister, strings.Trim(filePath, `"`))
+       fileRegisterLock.Unlock()
+}
+
+// RegisterReaderHandler registers a handler function which is used
+// to receive a io.Reader.
+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::<name>".
+// If the handler returns a io.ReadCloser Close() is called when the
+// request is finished.
+//
+//  mysql.RegisterReaderHandler("data", func() io.Reader {
+//     var csvReader io.Reader // Some Reader that returns CSV data
+//     ... // Open Reader here
+//     return csvReader
+//  })
+//  err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
+//  if err != nil {
+//  ...
+//
+func RegisterReaderHandler(name string, handler func() io.Reader) {
+       readerRegisterLock.Lock()
+       // lazy map init
+       if readerRegister == nil {
+               readerRegister = make(map[string]func() io.Reader)
+       }
+
+       readerRegister[name] = handler
+       readerRegisterLock.Unlock()
+}
+
+// DeregisterReaderHandler removes the ReaderHandler function with
+// the given name from the registry.
+func DeregisterReaderHandler(name string) {
+       readerRegisterLock.Lock()
+       delete(readerRegister, name)
+       readerRegisterLock.Unlock()
+}
+
+func deferredClose(err *error, closer io.Closer) {
+       closeErr := closer.Close()
+       if *err == nil {
+               *err = closeErr
+       }
+}
+
+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+       var rdr io.Reader
+       var data []byte
+       packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+       if mc.maxWriteSize < packetSize {
+               packetSize = mc.maxWriteSize
+       }
+
+       if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+               // The server might return an an absolute path. See issue #355.
+               name = name[idx+8:]
+
+               readerRegisterLock.RLock()
+               handler, inMap := readerRegister[name]
+               readerRegisterLock.RUnlock()
+
+               if inMap {
+                       rdr = handler()
+                       if rdr != nil {
+                               if cl, ok := rdr.(io.Closer); ok {
+                                       defer deferredClose(&err, cl)
+                               }
+                       } else {
+                               err = fmt.Errorf("Reader '%s' is <nil>", name)
+                       }
+               } else {
+                       err = fmt.Errorf("Reader '%s' is not registered", name)
+               }
+       } else { // File
+               name = strings.Trim(name, `"`)
+               fileRegisterLock.RLock()
+               fr := fileRegister[name]
+               fileRegisterLock.RUnlock()
+               if mc.cfg.AllowAllFiles || fr {
+                       var file *os.File
+                       var fi os.FileInfo
+
+                       if file, err = os.Open(name); err == nil {
+                               defer deferredClose(&err, file)
+
+                               // get file size
+                               if fi, err = file.Stat(); err == nil {
+                                       rdr = file
+                                       if fileSize := int(fi.Size()); fileSize < packetSize {
+                                               packetSize = fileSize
+                                       }
+                               }
+                       }
+               } else {
+                       err = fmt.Errorf("local file '%s' is not registered", name)
+               }
+       }
+
+       // send content packets
+       // if packetSize == 0, the Reader contains no data
+       if err == nil && packetSize > 0 {
+               data := make([]byte, 4+packetSize)
+               var n int
+               for err == nil {
+                       n, err = rdr.Read(data[4:])
+                       if n > 0 {
+                               if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+                                       return ioErr
+                               }
+                       }
+               }
+               if err == io.EOF {
+                       err = nil
+               }
+       }
+
+       // send empty packet (termination)
+       if data == nil {
+               data = make([]byte, 4)
+       }
+       if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+               return ioErr
+       }
+
+       // read OK packet
+       if err == nil {
+               return mc.readResultOK()
+       }
+
+       mc.readPacket()
+       return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
new file mode 100644 (file)
index 0000000..170aaa0
--- /dev/null
@@ -0,0 +1,1298 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "crypto/tls"
+       "database/sql/driver"
+       "encoding/binary"
+       "errors"
+       "fmt"
+       "io"
+       "math"
+       "time"
+)
+
+// Packets documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+// Read packet to buffer 'data'
+func (mc *mysqlConn) readPacket() ([]byte, error) {
+       var prevData []byte
+       for {
+               // read packet header
+               data, err := mc.buf.readNext(4)
+               if err != nil {
+                       if cerr := mc.canceled.Value(); cerr != nil {
+                               return nil, cerr
+                       }
+                       errLog.Print(err)
+                       mc.Close()
+                       return nil, ErrInvalidConn
+               }
+
+               // packet length [24 bit]
+               pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+
+               // check packet sync [8 bit]
+               if data[3] != mc.sequence {
+                       if data[3] > mc.sequence {
+                               return nil, ErrPktSyncMul
+                       }
+                       return nil, ErrPktSync
+               }
+               mc.sequence++
+
+               // packets with length 0 terminate a previous packet which is a
+               // multiple of (2^24)−1 bytes long
+               if pktLen == 0 {
+                       // there was no previous packet
+                       if prevData == nil {
+                               errLog.Print(ErrMalformPkt)
+                               mc.Close()
+                               return nil, ErrInvalidConn
+                       }
+
+                       return prevData, nil
+               }
+
+               // read packet body [pktLen bytes]
+               data, err = mc.buf.readNext(pktLen)
+               if err != nil {
+                       if cerr := mc.canceled.Value(); cerr != nil {
+                               return nil, cerr
+                       }
+                       errLog.Print(err)
+                       mc.Close()
+                       return nil, ErrInvalidConn
+               }
+
+               // return data if this was the last packet
+               if pktLen < maxPacketSize {
+                       // zero allocations for non-split packets
+                       if prevData == nil {
+                               return data, nil
+                       }
+
+                       return append(prevData, data...), nil
+               }
+
+               prevData = append(prevData, data...)
+       }
+}
+
+// Write packet buffer 'data'
+func (mc *mysqlConn) writePacket(data []byte) error {
+       pktLen := len(data) - 4
+
+       if pktLen > mc.maxAllowedPacket {
+               return ErrPktTooLarge
+       }
+
+       for {
+               var size int
+               if pktLen >= maxPacketSize {
+                       data[0] = 0xff
+                       data[1] = 0xff
+                       data[2] = 0xff
+                       size = maxPacketSize
+               } else {
+                       data[0] = byte(pktLen)
+                       data[1] = byte(pktLen >> 8)
+                       data[2] = byte(pktLen >> 16)
+                       size = pktLen
+               }
+               data[3] = mc.sequence
+
+               // Write packet
+               if mc.writeTimeout > 0 {
+                       if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+                               return err
+                       }
+               }
+
+               n, err := mc.netConn.Write(data[:4+size])
+               if err == nil && n == 4+size {
+                       mc.sequence++
+                       if size != maxPacketSize {
+                               return nil
+                       }
+                       pktLen -= size
+                       data = data[size:]
+                       continue
+               }
+
+               // Handle error
+               if err == nil { // n != len(data)
+                       mc.cleanup()
+                       errLog.Print(ErrMalformPkt)
+               } else {
+                       if cerr := mc.canceled.Value(); cerr != nil {
+                               return cerr
+                       }
+                       if n == 0 && pktLen == len(data)-4 {
+                               // only for the first loop iteration when nothing was written yet
+                               return errBadConnNoWrite
+                       }
+                       mc.cleanup()
+                       errLog.Print(err)
+               }
+               return ErrInvalidConn
+       }
+}
+
+/******************************************************************************
+*                           Initialization Process                            *
+******************************************************************************/
+
+// Handshake Initialization Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
+       data, err = mc.readPacket()
+       if err != nil {
+               // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+               // in connection initialization we don't risk retrying non-idempotent actions.
+               if err == ErrInvalidConn {
+                       return nil, "", driver.ErrBadConn
+               }
+               return
+       }
+
+       if data[0] == iERR {
+               return nil, "", mc.handleErrorPacket(data)
+       }
+
+       // protocol version [1 byte]
+       if data[0] < minProtocolVersion {
+               return nil, "", fmt.Errorf(
+                       "unsupported protocol version %d. Version %d or higher is required",
+                       data[0],
+                       minProtocolVersion,
+               )
+       }
+
+       // server version [null terminated string]
+       // connection id [4 bytes]
+       pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
+
+       // first part of the password cipher [8 bytes]
+       authData := data[pos : pos+8]
+
+       // (filler) always 0x00 [1 byte]
+       pos += 8 + 1
+
+       // capability flags (lower 2 bytes) [2 bytes]
+       mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+       if mc.flags&clientProtocol41 == 0 {
+               return nil, "", ErrOldProtocol
+       }
+       if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
+               return nil, "", ErrNoTLS
+       }
+       pos += 2
+
+       if len(data) > pos {
+               // character set [1 byte]
+               // status flags [2 bytes]
+               // capability flags (upper 2 bytes) [2 bytes]
+               // length of auth-plugin-data [1 byte]
+               // reserved (all [00]) [10 bytes]
+               pos += 1 + 2 + 2 + 1 + 10
+
+               // second part of the password cipher [mininum 13 bytes],
+               // where len=MAX(13, length of auth-plugin-data - 8)
+               //
+               // The web documentation is ambiguous about the length. However,
+               // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+               // the 13th byte is "\0 byte, terminating the second part of
+               // a scramble". So the second part of the password cipher is
+               // a NULL terminated string that's at least 13 bytes with the
+               // last byte being NULL.
+               //
+               // The official Python library uses the fixed length 12
+               // which seems to work but technically could have a hidden bug.
+               authData = append(authData, data[pos:pos+12]...)
+               pos += 13
+
+               // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+               // \NUL otherwise
+               if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+                       plugin = string(data[pos : pos+end])
+               } else {
+                       plugin = string(data[pos:])
+               }
+
+               // make a memory safe copy of the cipher slice
+               var b [20]byte
+               copy(b[:], authData)
+               return b[:], plugin, nil
+       }
+
+       // make a memory safe copy of the cipher slice
+       var b [8]byte
+       copy(b[:], authData)
+       return b[:], plugin, nil
+}
+
+// Client Authentication Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, plugin string) error {
+       // Adjust client flags based on server support
+       clientFlags := clientProtocol41 |
+               clientSecureConn |
+               clientLongPassword |
+               clientTransactions |
+               clientLocalFiles |
+               clientPluginAuth |
+               clientMultiResults |
+               mc.flags&clientLongFlag
+
+       if mc.cfg.ClientFoundRows {
+               clientFlags |= clientFoundRows
+       }
+
+       // To enable TLS / SSL
+       if mc.cfg.tls != nil {
+               clientFlags |= clientSSL
+       }
+
+       if mc.cfg.MultiStatements {
+               clientFlags |= clientMultiStatements
+       }
+
+       // encode length of the auth plugin data
+       var authRespLEIBuf [9]byte
+       authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(len(authResp)))
+       if len(authRespLEI) > 1 {
+               // if the length can not be written in 1 byte, it must be written as a
+               // length encoded integer
+               clientFlags |= clientPluginAuthLenEncClientData
+       }
+
+       pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
+       if addNUL {
+               pktLen++
+       }
+
+       // To specify a db name
+       if n := len(mc.cfg.DBName); n > 0 {
+               clientFlags |= clientConnectWithDB
+               pktLen += n + 1
+       }
+
+       // Calculate packet length and get buffer with that size
+       data := mc.buf.takeSmallBuffer(pktLen + 4)
+       if data == nil {
+               // cannot take the buffer. Something must be wrong with the connection
+               errLog.Print(ErrBusyBuffer)
+               return errBadConnNoWrite
+       }
+
+       // ClientFlags [32 bit]
+       data[4] = byte(clientFlags)
+       data[5] = byte(clientFlags >> 8)
+       data[6] = byte(clientFlags >> 16)
+       data[7] = byte(clientFlags >> 24)
+
+       // MaxPacketSize [32 bit] (none)
+       data[8] = 0x00
+       data[9] = 0x00
+       data[10] = 0x00
+       data[11] = 0x00
+
+       // Charset [1 byte]
+       var found bool
+       data[12], found = collations[mc.cfg.Collation]
+       if !found {
+               // Note possibility for false negatives:
+               // could be triggered  although the collation is valid if the
+               // collations map does not contain entries the server supports.
+               return errors.New("unknown collation")
+       }
+
+       // SSL Connection Request Packet
+       // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+       if mc.cfg.tls != nil {
+               // Send TLS / SSL request packet
+               if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+                       return err
+               }
+
+               // Switch to TLS
+               tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
+               if err := tlsConn.Handshake(); err != nil {
+                       return err
+               }
+               mc.netConn = tlsConn
+               mc.buf.nc = tlsConn
+       }
+
+       // Filler [23 bytes] (all 0x00)
+       pos := 13
+       for ; pos < 13+23; pos++ {
+               data[pos] = 0
+       }
+
+       // User [null terminated string]
+       if len(mc.cfg.User) > 0 {
+               pos += copy(data[pos:], mc.cfg.User)
+       }
+       data[pos] = 0x00
+       pos++
+
+       // Auth Data [length encoded integer]
+       pos += copy(data[pos:], authRespLEI)
+       pos += copy(data[pos:], authResp)
+       if addNUL {
+               data[pos] = 0x00
+               pos++
+       }
+
+       // Databasename [null terminated string]
+       if len(mc.cfg.DBName) > 0 {
+               pos += copy(data[pos:], mc.cfg.DBName)
+               data[pos] = 0x00
+               pos++
+       }
+
+       pos += copy(data[pos:], plugin)
+       data[pos] = 0x00
+
+       // Send Auth packet
+       return mc.writePacket(data)
+}
+
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte, addNUL bool) error {
+       pktLen := 4 + len(authData)
+       if addNUL {
+               pktLen++
+       }
+       data := mc.buf.takeSmallBuffer(pktLen)
+       if data == nil {
+               // cannot take the buffer. Something must be wrong with the connection
+               errLog.Print(ErrBusyBuffer)
+               return errBadConnNoWrite
+       }
+
+       // Add the auth data [EOF]
+       copy(data[4:], authData)
+       if addNUL {
+               data[pktLen-1] = 0x00
+       }
+
+       return mc.writePacket(data)
+}
+
+/******************************************************************************
+*                             Command Packets                                 *
+******************************************************************************/
+
+func (mc *mysqlConn) writeCommandPacket(command byte) error {
+       // Reset Packet Sequence
+       mc.sequence = 0
+
+       data := mc.buf.takeSmallBuffer(4 + 1)
+       if data == nil {
+               // cannot take the buffer. Something must be wrong with the connection
+               errLog.Print(ErrBusyBuffer)
+               return errBadConnNoWrite
+       }
+
+       // Add command byte
+       data[4] = command
+
+       // Send CMD packet
+       return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+       // Reset Packet Sequence
+       mc.sequence = 0
+
+       pktLen := 1 + len(arg)
+       data := mc.buf.takeBuffer(pktLen + 4)
+       if data == nil {
+               // cannot take the buffer. Something must be wrong with the connection
+               errLog.Print(ErrBusyBuffer)
+               return errBadConnNoWrite
+       }
+
+       // Add command byte
+       data[4] = command
+
+       // Add arg
+       copy(data[5:], arg)
+
+       // Send CMD packet
+       return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+       // Reset Packet Sequence
+       mc.sequence = 0
+
+       data := mc.buf.takeSmallBuffer(4 + 1 + 4)
+       if data == nil {
+               // cannot take the buffer. Something must be wrong with the connection
+               errLog.Print(ErrBusyBuffer)
+               return errBadConnNoWrite
+       }
+
+       // Add command byte
+       data[4] = command
+
+       // Add arg [32 bit]
+       data[5] = byte(arg)
+       data[6] = byte(arg >> 8)
+       data[7] = byte(arg >> 16)
+       data[8] = byte(arg >> 24)
+
+       // Send CMD packet
+       return mc.writePacket(data)
+}
+
+/******************************************************************************
+*                              Result Packets                                 *
+******************************************************************************/
+
+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
+       data, err := mc.readPacket()
+       if err != nil {
+               return nil, "", err
+       }
+
+       // packet indicator
+       switch data[0] {
+
+       case iOK:
+               return nil, "", mc.handleOkPacket(data)
+
+       case iAuthMoreData:
+               return data[1:], "", err
+
+       case iEOF:
+               if len(data) < 1 {
+                       // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+                       return nil, "mysql_old_password", nil
+               }
+               pluginEndIndex := bytes.IndexByte(data, 0x00)
+               if pluginEndIndex < 0 {
+                       return nil, "", ErrMalformPkt
+               }
+               plugin := string(data[1:pluginEndIndex])
+               authData := data[pluginEndIndex+1:]
+               return authData, plugin, nil
+
+       default: // Error otherwise
+               return nil, "", mc.handleErrorPacket(data)
+       }
+}
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+       data, err := mc.readPacket()
+       if err != nil {
+               return err
+       }
+
+       if data[0] == iOK {
+               return mc.handleOkPacket(data)
+       }
+       return mc.handleErrorPacket(data)
+}
+
+// Result Set Header Packet
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+       data, err := mc.readPacket()
+       if err == nil {
+               switch data[0] {
+
+               case iOK:
+                       return 0, mc.handleOkPacket(data)
+
+               case iERR:
+                       return 0, mc.handleErrorPacket(data)
+
+               case iLocalInFile:
+                       return 0, mc.handleInFileRequest(string(data[1:]))
+               }
+
+               // column count
+               num, _, n := readLengthEncodedInteger(data)
+               if n-len(data) == 0 {
+                       return int(num), nil
+               }
+
+               return 0, ErrMalformPkt
+       }
+       return 0, err
+}
+
+// Error Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+       if data[0] != iERR {
+               return ErrMalformPkt
+       }
+
+       // 0xff [1 byte]
+
+       // Error Number [16 bit uint]
+       errno := binary.LittleEndian.Uint16(data[1:3])
+
+       // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+       // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+       if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+               // Oops; we are connected to a read-only connection, and won't be able
+               // to issue any write statements. Since RejectReadOnly is configured,
+               // we throw away this connection hoping this one would have write
+               // permission. This is specifically for a possible race condition
+               // during failover (e.g. on AWS Aurora). See README.md for more.
+               //
+               // We explicitly close the connection before returning
+               // driver.ErrBadConn to ensure that `database/sql` purges this
+               // connection and initiates a new one for next statement next time.
+               mc.Close()
+               return driver.ErrBadConn
+       }
+
+       pos := 3
+
+       // SQL State [optional: # + 5bytes string]
+       if data[3] == 0x23 {
+               //sqlstate := string(data[4 : 4+5])
+               pos = 9
+       }
+
+       // Error Message [string]
+       return &MySQLError{
+               Number:  errno,
+               Message: string(data[pos:]),
+       }
+}
+
+func readStatus(b []byte) statusFlag {
+       return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
+// Ok Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+func (mc *mysqlConn) handleOkPacket(data []byte) error {
+       var n, m int
+
+       // 0x00 [1 byte]
+
+       // Affected rows [Length Coded Binary]
+       mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+
+       // Insert id [Length Coded Binary]
+       mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+       // server_status [2 bytes]
+       mc.status = readStatus(data[1+n+m : 1+n+m+2])
+       if mc.status&statusMoreResultsExists != 0 {
+               return nil
+       }
+
+       // warning count [2 bytes]
+
+       return nil
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+       columns := make([]mysqlField, count)
+
+       for i := 0; ; i++ {
+               data, err := mc.readPacket()
+               if err != nil {
+                       return nil, err
+               }
+
+               // EOF Packet
+               if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+                       if i == count {
+                               return columns, nil
+                       }
+                       return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+               }
+
+               // Catalog
+               pos, err := skipLengthEncodedString(data)
+               if err != nil {
+                       return nil, err
+               }
+
+               // Database [len coded string]
+               n, err := skipLengthEncodedString(data[pos:])
+               if err != nil {
+                       return nil, err
+               }
+               pos += n
+
+               // Table [len coded string]
+               if mc.cfg.ColumnsWithAlias {
+                       tableName, _, n, err := readLengthEncodedString(data[pos:])
+                       if err != nil {
+                               return nil, err
+                       }
+                       pos += n
+                       columns[i].tableName = string(tableName)
+               } else {
+                       n, err = skipLengthEncodedString(data[pos:])
+                       if err != nil {
+                               return nil, err
+                       }
+                       pos += n
+               }
+
+               // Original table [len coded string]
+               n, err = skipLengthEncodedString(data[pos:])
+               if err != nil {
+                       return nil, err
+               }
+               pos += n
+
+               // Name [len coded string]
+               name, _, n, err := readLengthEncodedString(data[pos:])
+               if err != nil {
+                       return nil, err
+               }
+               columns[i].name = string(name)
+               pos += n
+
+               // Original name [len coded string]
+               n, err = skipLengthEncodedString(data[pos:])
+               if err != nil {
+                       return nil, err
+               }
+               pos += n
+
+               // Filler [uint8]
+               pos++
+
+               // Charset [charset, collation uint8]
+               columns[i].charSet = data[pos]
+               pos += 2
+
+               // Length [uint32]
+               columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+               pos += 4
+
+               // Field type [uint8]
+               columns[i].fieldType = fieldType(data[pos])
+               pos++
+
+               // Flags [uint16]
+               columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+               pos += 2
+
+               // Decimals [uint8]
+               columns[i].decimals = data[pos]
+               //pos++
+
+               // Default value [len coded binary]
+               //if pos < len(data) {
+               //      defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+               //}
+       }
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+func (rows *textRows) readRow(dest []driver.Value) error {
+       mc := rows.mc
+
+       if rows.rs.done {
+               return io.EOF
+       }
+
+       data, err := mc.readPacket()
+       if err != nil {
+               return err
+       }
+
+       // EOF Packet
+       if data[0] == iEOF && len(data) == 5 {
+               // server_status [2 bytes]
+               rows.mc.status = readStatus(data[3:])
+               rows.rs.done = true
+               if !rows.HasNextResultSet() {
+                       rows.mc = nil
+               }
+               return io.EOF
+       }
+       if data[0] == iERR {
+               rows.mc = nil
+               return mc.handleErrorPacket(data)
+       }
+
+       // RowSet Packet
+       var n int
+       var isNull bool
+       pos := 0
+
+       for i := range dest {
+               // Read bytes and convert to string
+               dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+               pos += n
+               if err == nil {
+                       if !isNull {
+                               if !mc.parseTime {
+                                       continue
+                               } else {
+                                       switch rows.rs.columns[i].fieldType {
+                                       case fieldTypeTimestamp, fieldTypeDateTime,
+                                               fieldTypeDate, fieldTypeNewDate:
+                                               dest[i], err = parseDateTime(
+                                                       string(dest[i].([]byte)),
+                                                       mc.cfg.Loc,
+                                               )
+                                               if err == nil {
+                                                       continue
+                                               }
+                                       default:
+                                               continue
+                                       }
+                               }
+
+                       } else {
+                               dest[i] = nil
+                               continue
+                       }
+               }
+               return err // err != nil
+       }
+
+       return nil
+}
+
+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+func (mc *mysqlConn) readUntilEOF() error {
+       for {
+               data, err := mc.readPacket()
+               if err != nil {
+                       return err
+               }
+
+               switch data[0] {
+               case iERR:
+                       return mc.handleErrorPacket(data)
+               case iEOF:
+                       if len(data) == 5 {
+                               mc.status = readStatus(data[3:])
+                       }
+                       return nil
+               }
+       }
+}
+
+/******************************************************************************
+*                           Prepared Statements                               *
+******************************************************************************/
+
+// Prepare Result Packets
+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+       data, err := stmt.mc.readPacket()
+       if err == nil {
+               // packet indicator [1 byte]
+               if data[0] != iOK {
+                       return 0, stmt.mc.handleErrorPacket(data)
+               }
+
+               // statement id [4 bytes]
+               stmt.id = binary.LittleEndian.Uint32(data[1:5])
+
+               // Column count [16 bit uint]
+               columnCount := binary.LittleEndian.Uint16(data[5:7])
+
+               // Param count [16 bit uint]
+               stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
+
+               // Reserved [8 bit]
+
+               // Warning count [16 bit uint]
+
+               return columnCount, nil
+       }
+       return 0, err
+}
+
+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+       maxLen := stmt.mc.maxAllowedPacket - 1
+       pktLen := maxLen
+
+       // After the header (bytes 0-3) follows before the data:
+       // 1 byte command
+       // 4 bytes stmtID
+       // 2 bytes paramID
+       const dataOffset = 1 + 4 + 2
+
+       // Cannot use the write buffer since
+       // a) the buffer is too small
+       // b) it is in use
+       data := make([]byte, 4+1+4+2+len(arg))
+
+       copy(data[4+dataOffset:], arg)
+
+       for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+               if dataOffset+argLen < maxLen {
+                       pktLen = dataOffset + argLen
+               }
+
+               stmt.mc.sequence = 0
+               // Add command byte [1 byte]
+               data[4] = comStmtSendLongData
+
+               // Add stmtID [32 bit]
+               data[5] = byte(stmt.id)
+               data[6] = byte(stmt.id >> 8)
+               data[7] = byte(stmt.id >> 16)
+               data[8] = byte(stmt.id >> 24)
+
+               // Add paramID [16 bit]
+               data[9] = byte(paramID)
+               data[10] = byte(paramID >> 8)
+
+               // Send CMD packet
+               err := stmt.mc.writePacket(data[:4+pktLen])
+               if err == nil {
+                       data = data[pktLen-dataOffset:]
+                       continue
+               }
+               return err
+
+       }
+
+       // Reset Packet Sequence
+       stmt.mc.sequence = 0
+       return nil
+}
+
+// Execute Prepared Statement
+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+       if len(args) != stmt.paramCount {
+               return fmt.Errorf(
+                       "argument count mismatch (got: %d; has: %d)",
+                       len(args),
+                       stmt.paramCount,
+               )
+       }
+
+       const minPktLen = 4 + 1 + 4 + 1 + 4
+       mc := stmt.mc
+
+       // Determine threshould dynamically to avoid packet size shortage.
+       longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+       if longDataSize < 64 {
+               longDataSize = 64
+       }
+
+       // Reset packet-sequence
+       mc.sequence = 0
+
+       var data []byte
+
+       if len(args) == 0 {
+               data = mc.buf.takeBuffer(minPktLen)
+       } else {
+               data = mc.buf.takeCompleteBuffer()
+       }
+       if data == nil {
+               // cannot take the buffer. Something must be wrong with the connection
+               errLog.Print(ErrBusyBuffer)
+               return errBadConnNoWrite
+       }
+
+       // command [1 byte]
+       data[4] = comStmtExecute
+
+       // statement_id [4 bytes]
+       data[5] = byte(stmt.id)
+       data[6] = byte(stmt.id >> 8)
+       data[7] = byte(stmt.id >> 16)
+       data[8] = byte(stmt.id >> 24)
+
+       // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+       data[9] = 0x00
+
+       // iteration_count (uint32(1)) [4 bytes]
+       data[10] = 0x01
+       data[11] = 0x00
+       data[12] = 0x00
+       data[13] = 0x00
+
+       if len(args) > 0 {
+               pos := minPktLen
+
+               var nullMask []byte
+               if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) {
+                       // buffer has to be extended but we don't know by how much so
+                       // we depend on append after all data with known sizes fit.
+                       // We stop at that because we deal with a lot of columns here
+                       // which makes the required allocation size hard to guess.
+                       tmp := make([]byte, pos+maskLen+typesLen)
+                       copy(tmp[:pos], data[:pos])
+                       data = tmp
+                       nullMask = data[pos : pos+maskLen]
+                       pos += maskLen
+               } else {
+                       nullMask = data[pos : pos+maskLen]
+                       for i := 0; i < maskLen; i++ {
+                               nullMask[i] = 0
+                       }
+                       pos += maskLen
+               }
+
+               // newParameterBoundFlag 1 [1 byte]
+               data[pos] = 0x01
+               pos++
+
+               // type of each parameter [len(args)*2 bytes]
+               paramTypes := data[pos:]
+               pos += len(args) * 2
+
+               // value of each parameter [n bytes]
+               paramValues := data[pos:pos]
+               valuesCap := cap(paramValues)
+
+               for i, arg := range args {
+                       // build NULL-bitmap
+                       if arg == nil {
+                               nullMask[i/8] |= 1 << (uint(i) & 7)
+                               paramTypes[i+i] = byte(fieldTypeNULL)
+                               paramTypes[i+i+1] = 0x00
+                               continue
+                       }
+
+                       // cache types and values
+                       switch v := arg.(type) {
+                       case int64:
+                               paramTypes[i+i] = byte(fieldTypeLongLong)
+                               paramTypes[i+i+1] = 0x00
+
+                               if cap(paramValues)-len(paramValues)-8 >= 0 {
+                                       paramValues = paramValues[:len(paramValues)+8]
+                                       binary.LittleEndian.PutUint64(
+                                               paramValues[len(paramValues)-8:],
+                                               uint64(v),
+                                       )
+                               } else {
+                                       paramValues = append(paramValues,
+                                               uint64ToBytes(uint64(v))...,
+                                       )
+                               }
+
+                       case float64:
+                               paramTypes[i+i] = byte(fieldTypeDouble)
+                               paramTypes[i+i+1] = 0x00
+
+                               if cap(paramValues)-len(paramValues)-8 >= 0 {
+                                       paramValues = paramValues[:len(paramValues)+8]
+                                       binary.LittleEndian.PutUint64(
+                                               paramValues[len(paramValues)-8:],
+                                               math.Float64bits(v),
+                                       )
+                               } else {
+                                       paramValues = append(paramValues,
+                                               uint64ToBytes(math.Float64bits(v))...,
+                                       )
+                               }
+
+                       case bool:
+                               paramTypes[i+i] = byte(fieldTypeTiny)
+                               paramTypes[i+i+1] = 0x00
+
+                               if v {
+                                       paramValues = append(paramValues, 0x01)
+                               } else {
+                                       paramValues = append(paramValues, 0x00)
+                               }
+
+                       case []byte:
+                               // Common case (non-nil value) first
+                               if v != nil {
+                                       paramTypes[i+i] = byte(fieldTypeString)
+                                       paramTypes[i+i+1] = 0x00
+
+                                       if len(v) < longDataSize {
+                                               paramValues = appendLengthEncodedInteger(paramValues,
+                                                       uint64(len(v)),
+                                               )
+                                               paramValues = append(paramValues, v...)
+                                       } else {
+                                               if err := stmt.writeCommandLongData(i, v); err != nil {
+                                                       return err
+                                               }
+                                       }
+                                       continue
+                               }
+
+                               // Handle []byte(nil) as a NULL value
+                               nullMask[i/8] |= 1 << (uint(i) & 7)
+                               paramTypes[i+i] = byte(fieldTypeNULL)
+                               paramTypes[i+i+1] = 0x00
+
+                       case string:
+                               paramTypes[i+i] = byte(fieldTypeString)
+                               paramTypes[i+i+1] = 0x00
+
+                               if len(v) < longDataSize {
+                                       paramValues = appendLengthEncodedInteger(paramValues,
+                                               uint64(len(v)),
+                                       )
+                                       paramValues = append(paramValues, v...)
+                               } else {
+                                       if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+                                               return err
+                                       }
+                               }
+
+                       case time.Time:
+                               paramTypes[i+i] = byte(fieldTypeString)
+                               paramTypes[i+i+1] = 0x00
+
+                               var a [64]byte
+                               var b = a[:0]
+
+                               if v.IsZero() {
+                                       b = append(b, "0000-00-00"...)
+                               } else {
+                                       b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
+                               }
+
+                               paramValues = appendLengthEncodedInteger(paramValues,
+                                       uint64(len(b)),
+                               )
+                               paramValues = append(paramValues, b...)
+
+                       default:
+                               return fmt.Errorf("cannot convert type: %T", arg)
+                       }
+               }
+
+               // Check if param values exceeded the available buffer
+               // In that case we must build the data packet with the new values buffer
+               if valuesCap != cap(paramValues) {
+                       data = append(data[:pos], paramValues...)
+                       mc.buf.buf = data
+               }
+
+               pos += len(paramValues)
+               data = data[:pos]
+       }
+
+       return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) discardResults() error {
+       for mc.status&statusMoreResultsExists != 0 {
+               resLen, err := mc.readResultSetHeaderPacket()
+               if err != nil {
+                       return err
+               }
+               if resLen > 0 {
+                       // columns
+                       if err := mc.readUntilEOF(); err != nil {
+                               return err
+                       }
+                       // rows
+                       if err := mc.readUntilEOF(); err != nil {
+                               return err
+                       }
+               }
+       }
+       return nil
+}
+
+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+func (rows *binaryRows) readRow(dest []driver.Value) error {
+       data, err := rows.mc.readPacket()
+       if err != nil {
+               return err
+       }
+
+       // packet indicator [1 byte]
+       if data[0] != iOK {
+               // EOF Packet
+               if data[0] == iEOF && len(data) == 5 {
+                       rows.mc.status = readStatus(data[3:])
+                       rows.rs.done = true
+                       if !rows.HasNextResultSet() {
+                               rows.mc = nil
+                       }
+                       return io.EOF
+               }
+               mc := rows.mc
+               rows.mc = nil
+
+               // Error otherwise
+               return mc.handleErrorPacket(data)
+       }
+
+       // NULL-bitmap,  [(column-count + 7 + 2) / 8 bytes]
+       pos := 1 + (len(dest)+7+2)>>3
+       nullMask := data[1:pos]
+
+       for i := range dest {
+               // Field is NULL
+               // (byte >> bit-pos) % 2 == 1
+               if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+                       dest[i] = nil
+                       continue
+               }
+
+               // Convert to byte-coded string
+               switch rows.rs.columns[i].fieldType {
+               case fieldTypeNULL:
+                       dest[i] = nil
+                       continue
+
+               // Numeric Types
+               case fieldTypeTiny:
+                       if rows.rs.columns[i].flags&flagUnsigned != 0 {
+                               dest[i] = int64(data[pos])
+                       } else {
+                               dest[i] = int64(int8(data[pos]))
+                       }
+                       pos++
+                       continue
+
+               case fieldTypeShort, fieldTypeYear:
+                       if rows.rs.columns[i].flags&flagUnsigned != 0 {
+                               dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+                       } else {
+                               dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+                       }
+                       pos += 2
+                       continue
+
+               case fieldTypeInt24, fieldTypeLong:
+                       if rows.rs.columns[i].flags&flagUnsigned != 0 {
+                               dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+                       } else {
+                               dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+                       }
+                       pos += 4
+                       continue
+
+               case fieldTypeLongLong:
+                       if rows.rs.columns[i].flags&flagUnsigned != 0 {
+                               val := binary.LittleEndian.Uint64(data[pos : pos+8])
+                               if val > math.MaxInt64 {
+                                       dest[i] = uint64ToString(val)
+                               } else {
+                                       dest[i] = int64(val)
+                               }
+                       } else {
+                               dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+                       }
+                       pos += 8
+                       continue
+
+               case fieldTypeFloat:
+                       dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
+                       pos += 4
+                       continue
+
+               case fieldTypeDouble:
+                       dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+                       pos += 8
+                       continue
+
+               // Length coded Binary Strings
+               case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+                       fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+                       fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+                       fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+                       var isNull bool
+                       var n int
+                       dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+                       pos += n
+                       if err == nil {
+                               if !isNull {
+                                       continue
+                               } else {
+                                       dest[i] = nil
+                                       continue
+                               }
+                       }
+                       return err
+
+               case
+                       fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+                       fieldTypeTime,                         // Time [-][H]HH:MM:SS[.fractal]
+                       fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
+                       num, isNull, n := readLengthEncodedInteger(data[pos:])
+                       pos += n
+
+                       switch {
+                       case isNull:
+                               dest[i] = nil
+                               continue
+                       case rows.rs.columns[i].fieldType == fieldTypeTime:
+                               // database/sql does not support an equivalent to TIME, return a string
+                               var dstlen uint8
+                               switch decimals := rows.rs.columns[i].decimals; decimals {
+                               case 0x00, 0x1f:
+                                       dstlen = 8
+                               case 1, 2, 3, 4, 5, 6:
+                                       dstlen = 8 + 1 + decimals
+                               default:
+                                       return fmt.Errorf(
+                                               "protocol error, illegal decimals value %d",
+                                               rows.rs.columns[i].decimals,
+                                       )
+                               }
+                               dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
+                       case rows.mc.parseTime:
+                               dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+                       default:
+                               var dstlen uint8
+                               if rows.rs.columns[i].fieldType == fieldTypeDate {
+                                       dstlen = 10
+                               } else {
+                                       switch decimals := rows.rs.columns[i].decimals; decimals {
+                                       case 0x00, 0x1f:
+                                               dstlen = 19
+                                       case 1, 2, 3, 4, 5, 6:
+                                               dstlen = 19 + 1 + decimals
+                                       default:
+                                               return fmt.Errorf(
+                                                       "protocol error, illegal decimals value %d",
+                                                       rows.rs.columns[i].decimals,
+                                               )
+                                       }
+                               }
+                               dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
+                       }
+
+                       if err == nil {
+                               pos += int(num)
+                               continue
+                       } else {
+                               return err
+                       }
+
+               // Please report if this happens!
+               default:
+                       return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets_test.go b/vendor/github.com/go-sql-driver/mysql/packets_test.go
new file mode 100644 (file)
index 0000000..b61e4db
--- /dev/null
@@ -0,0 +1,336 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "errors"
+       "net"
+       "testing"
+       "time"
+)
+
+var (
+       errConnClosed        = errors.New("connection is closed")
+       errConnTooManyReads  = errors.New("too many reads")
+       errConnTooManyWrites = errors.New("too many writes")
+)
+
+// struct to mock a net.Conn for testing purposes
+type mockConn struct {
+       laddr         net.Addr
+       raddr         net.Addr
+       data          []byte
+       written       []byte
+       queuedReplies [][]byte
+       closed        bool
+       read          int
+       reads         int
+       writes        int
+       maxReads      int
+       maxWrites     int
+}
+
+func (m *mockConn) Read(b []byte) (n int, err error) {
+       if m.closed {
+               return 0, errConnClosed
+       }
+
+       m.reads++
+       if m.maxReads > 0 && m.reads > m.maxReads {
+               return 0, errConnTooManyReads
+       }
+
+       n = copy(b, m.data)
+       m.read += n
+       m.data = m.data[n:]
+       return
+}
+func (m *mockConn) Write(b []byte) (n int, err error) {
+       if m.closed {
+               return 0, errConnClosed
+       }
+
+       m.writes++
+       if m.maxWrites > 0 && m.writes > m.maxWrites {
+               return 0, errConnTooManyWrites
+       }
+
+       n = len(b)
+       m.written = append(m.written, b...)
+
+       if n > 0 && len(m.queuedReplies) > 0 {
+               m.data = m.queuedReplies[0]
+               m.queuedReplies = m.queuedReplies[1:]
+       }
+       return
+}
+func (m *mockConn) Close() error {
+       m.closed = true
+       return nil
+}
+func (m *mockConn) LocalAddr() net.Addr {
+       return m.laddr
+}
+func (m *mockConn) RemoteAddr() net.Addr {
+       return m.raddr
+}
+func (m *mockConn) SetDeadline(t time.Time) error {
+       return nil
+}
+func (m *mockConn) SetReadDeadline(t time.Time) error {
+       return nil
+}
+func (m *mockConn) SetWriteDeadline(t time.Time) error {
+       return nil
+}
+
+// make sure mockConn implements the net.Conn interface
+var _ net.Conn = new(mockConn)
+
+func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
+       conn := new(mockConn)
+       mc := &mysqlConn{
+               buf:              newBuffer(conn),
+               cfg:              NewConfig(),
+               netConn:          conn,
+               closech:          make(chan struct{}),
+               maxAllowedPacket: defaultMaxAllowedPacket,
+               sequence:         sequence,
+       }
+       return conn, mc
+}
+
+func TestReadPacketSingleByte(t *testing.T) {
+       conn := new(mockConn)
+       mc := &mysqlConn{
+               buf: newBuffer(conn),
+       }
+
+       conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
+       conn.maxReads = 1
+       packet, err := mc.readPacket()
+       if err != nil {
+               t.Fatal(err)
+       }
+       if len(packet) != 1 {
+               t.Fatalf("unexpected packet length: expected %d, got %d", 1, len(packet))
+       }
+       if packet[0] != 0xff {
+               t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0])
+       }
+}
+
+func TestReadPacketWrongSequenceID(t *testing.T) {
+       conn := new(mockConn)
+       mc := &mysqlConn{
+               buf: newBuffer(conn),
+       }
+
+       // too low sequence id
+       conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
+       conn.maxReads = 1
+       mc.sequence = 1
+       _, err := mc.readPacket()
+       if err != ErrPktSync {
+               t.Errorf("expected ErrPktSync, got %v", err)
+       }
+
+       // reset
+       conn.reads = 0
+       mc.sequence = 0
+       mc.buf = newBuffer(conn)
+
+       // too high sequence id
+       conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
+       _, err = mc.readPacket()
+       if err != ErrPktSyncMul {
+               t.Errorf("expected ErrPktSyncMul, got %v", err)
+       }
+}
+
+func TestReadPacketSplit(t *testing.T) {
+       conn := new(mockConn)
+       mc := &mysqlConn{
+               buf: newBuffer(conn),
+       }
+
+       data := make([]byte, maxPacketSize*2+4*3)
+       const pkt2ofs = maxPacketSize + 4
+       const pkt3ofs = 2 * (maxPacketSize + 4)
+
+       // case 1: payload has length maxPacketSize
+       data = data[:pkt2ofs+4]
+
+       // 1st packet has maxPacketSize length and sequence id 0
+       // ff ff ff 00 ...
+       data[0] = 0xff
+       data[1] = 0xff
+       data[2] = 0xff
+
+       // mark the payload start and end of 1st packet so that we can check if the
+       // content was correctly appended
+       data[4] = 0x11
+       data[maxPacketSize+3] = 0x22
+
+       // 2nd packet has payload length 0 and squence id 1
+       // 00 00 00 01
+       data[pkt2ofs+3] = 0x01
+
+       conn.data = data
+       conn.maxReads = 3
+       packet, err := mc.readPacket()
+       if err != nil {
+               t.Fatal(err)
+       }
+       if len(packet) != maxPacketSize {
+               t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize, len(packet))
+       }
+       if packet[0] != 0x11 {
+               t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
+       }
+       if packet[maxPacketSize-1] != 0x22 {
+               t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1])
+       }
+
+       // case 2: payload has length which is a multiple of maxPacketSize
+       data = data[:cap(data)]
+
+       // 2nd packet now has maxPacketSize length
+       data[pkt2ofs] = 0xff
+       data[pkt2ofs+1] = 0xff
+       data[pkt2ofs+2] = 0xff
+
+       // mark the payload start and end of the 2nd packet
+       data[pkt2ofs+4] = 0x33
+       data[pkt2ofs+maxPacketSize+3] = 0x44
+
+       // 3rd packet has payload length 0 and squence id 2
+       // 00 00 00 02
+       data[pkt3ofs+3] = 0x02
+
+       conn.data = data
+       conn.reads = 0
+       conn.maxReads = 5
+       mc.sequence = 0
+       packet, err = mc.readPacket()
+       if err != nil {
+               t.Fatal(err)
+       }
+       if len(packet) != 2*maxPacketSize {
+               t.Fatalf("unexpected packet length: expected %d, got %d", 2*maxPacketSize, len(packet))
+       }
+       if packet[0] != 0x11 {
+               t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
+       }
+       if packet[2*maxPacketSize-1] != 0x44 {
+               t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1])
+       }
+
+       // case 3: payload has a length larger maxPacketSize, which is not an exact
+       // multiple of it
+       data = data[:pkt2ofs+4+42]
+       data[pkt2ofs] = 0x2a
+       data[pkt2ofs+1] = 0x00
+       data[pkt2ofs+2] = 0x00
+       data[pkt2ofs+4+41] = 0x44
+
+       conn.data = data
+       conn.reads = 0
+       conn.maxReads = 4
+       mc.sequence = 0
+       packet, err = mc.readPacket()
+       if err != nil {
+               t.Fatal(err)
+       }
+       if len(packet) != maxPacketSize+42 {
+               t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize+42, len(packet))
+       }
+       if packet[0] != 0x11 {
+               t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
+       }
+       if packet[maxPacketSize+41] != 0x44 {
+               t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41])
+       }
+}
+
+func TestReadPacketFail(t *testing.T) {
+       conn := new(mockConn)
+       mc := &mysqlConn{
+               buf:     newBuffer(conn),
+               closech: make(chan struct{}),
+       }
+
+       // illegal empty (stand-alone) packet
+       conn.data = []byte{0x00, 0x00, 0x00, 0x00}
+       conn.maxReads = 1
+       _, err := mc.readPacket()
+       if err != ErrInvalidConn {
+               t.Errorf("expected ErrInvalidConn, got %v", err)
+       }
+
+       // reset
+       conn.reads = 0
+       mc.sequence = 0
+       mc.buf = newBuffer(conn)
+
+       // fail to read header
+       conn.closed = true
+       _, err = mc.readPacket()
+       if err != ErrInvalidConn {
+               t.Errorf("expected ErrInvalidConn, got %v", err)
+       }
+
+       // reset
+       conn.closed = false
+       conn.reads = 0
+       mc.sequence = 0
+       mc.buf = newBuffer(conn)
+
+       // fail to read body
+       conn.maxReads = 1
+       _, err = mc.readPacket()
+       if err != ErrInvalidConn {
+               t.Errorf("expected ErrInvalidConn, got %v", err)
+       }
+}
+
+// https://github.com/go-sql-driver/mysql/pull/801
+// not-NUL terminated plugin_name in init packet
+func TestRegression801(t *testing.T) {
+       conn := new(mockConn)
+       mc := &mysqlConn{
+               buf:      newBuffer(conn),
+               cfg:      new(Config),
+               sequence: 42,
+               closech:  make(chan struct{}),
+       }
+
+       conn.data = []byte{72, 0, 0, 42, 10, 53, 46, 53, 46, 56, 0, 165, 0, 0, 0,
+               60, 70, 63, 58, 68, 104, 34, 97, 0, 223, 247, 33, 2, 0, 15, 128, 21, 0,
+               0, 0, 0, 0, 0, 0, 0, 0, 0, 98, 120, 114, 47, 85, 75, 109, 99, 51, 77,
+               50, 64, 0, 109, 121, 115, 113, 108, 95, 110, 97, 116, 105, 118, 101, 95,
+               112, 97, 115, 115, 119, 111, 114, 100}
+       conn.maxReads = 1
+
+       authData, pluginName, err := mc.readHandshakePacket()
+       if err != nil {
+               t.Fatalf("got error: %v", err)
+       }
+
+       if pluginName != "mysql_native_password" {
+               t.Errorf("expected plugin name 'mysql_native_password', got '%s'", pluginName)
+       }
+
+       expectedAuthData := []byte{60, 70, 63, 58, 68, 104, 34, 97, 98, 120, 114,
+               47, 85, 75, 109, 99, 51, 77, 50, 64}
+       if !bytes.Equal(authData, expectedAuthData) {
+               t.Errorf("expected authData '%v', got '%v'", expectedAuthData, authData)
+       }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
new file mode 100644 (file)
index 0000000..c6438d0
--- /dev/null
@@ -0,0 +1,22 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlResult struct {
+       affectedRows int64
+       insertId     int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+       return res.insertId, nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+       return res.affectedRows, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
new file mode 100644 (file)
index 0000000..d3b1e28
--- /dev/null
@@ -0,0 +1,216 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "database/sql/driver"
+       "io"
+       "math"
+       "reflect"
+)
+
+type resultSet struct {
+       columns     []mysqlField
+       columnNames []string
+       done        bool
+}
+
+type mysqlRows struct {
+       mc     *mysqlConn
+       rs     resultSet
+       finish func()
+}
+
+type binaryRows struct {
+       mysqlRows
+}
+
+type textRows struct {
+       mysqlRows
+}
+
+func (rows *mysqlRows) Columns() []string {
+       if rows.rs.columnNames != nil {
+               return rows.rs.columnNames
+       }
+
+       columns := make([]string, len(rows.rs.columns))
+       if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+               for i := range columns {
+                       if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+                               columns[i] = tableName + "." + rows.rs.columns[i].name
+                       } else {
+                               columns[i] = rows.rs.columns[i].name
+                       }
+               }
+       } else {
+               for i := range columns {
+                       columns[i] = rows.rs.columns[i].name
+               }
+       }
+
+       rows.rs.columnNames = columns
+       return columns
+}
+
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+       return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+//     return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+       return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+       column := rows.rs.columns[i]
+       decimals := int64(column.decimals)
+
+       switch column.fieldType {
+       case fieldTypeDecimal, fieldTypeNewDecimal:
+               if decimals > 0 {
+                       return int64(column.length) - 2, decimals, true
+               }
+               return int64(column.length) - 1, decimals, true
+       case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+               return decimals, decimals, true
+       case fieldTypeFloat, fieldTypeDouble:
+               if decimals == 0x1f {
+                       return math.MaxInt64, math.MaxInt64, true
+               }
+               return math.MaxInt64, decimals, true
+       }
+
+       return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+       return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+       if f := rows.finish; f != nil {
+               f()
+               rows.finish = nil
+       }
+
+       mc := rows.mc
+       if mc == nil {
+               return nil
+       }
+       if err := mc.error(); err != nil {
+               return err
+       }
+
+       // Remove unread packets from stream
+       if !rows.rs.done {
+               err = mc.readUntilEOF()
+       }
+       if err == nil {
+               if err = mc.discardResults(); err != nil {
+                       return err
+               }
+       }
+
+       rows.mc = nil
+       return err
+}
+
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+       if rows.mc == nil {
+               return false
+       }
+       return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+       if rows.mc == nil {
+               return 0, io.EOF
+       }
+       if err := rows.mc.error(); err != nil {
+               return 0, err
+       }
+
+       // Remove unread packets from stream
+       if !rows.rs.done {
+               if err := rows.mc.readUntilEOF(); err != nil {
+                       return 0, err
+               }
+               rows.rs.done = true
+       }
+
+       if !rows.HasNextResultSet() {
+               rows.mc = nil
+               return 0, io.EOF
+       }
+       rows.rs = resultSet{}
+       return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+       for {
+               resLen, err := rows.nextResultSet()
+               if err != nil {
+                       return 0, err
+               }
+
+               if resLen > 0 {
+                       return resLen, nil
+               }
+
+               rows.rs.done = true
+       }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+       resLen, err := rows.nextNotEmptyResultSet()
+       if err != nil {
+               return err
+       }
+
+       rows.rs.columns, err = rows.mc.readColumns(resLen)
+       return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
+       if mc := rows.mc; mc != nil {
+               if err := mc.error(); err != nil {
+                       return err
+               }
+
+               // Fetch next row from stream
+               return rows.readRow(dest)
+       }
+       return io.EOF
+}
+
+func (rows *textRows) NextResultSet() (err error) {
+       resLen, err := rows.nextNotEmptyResultSet()
+       if err != nil {
+               return err
+       }
+
+       rows.rs.columns, err = rows.mc.readColumns(resLen)
+       return err
+}
+
+func (rows *textRows) Next(dest []driver.Value) error {
+       if mc := rows.mc; mc != nil {
+               if err := mc.error(); err != nil {
+                       return err
+               }
+
+               // Fetch next row from stream
+               return rows.readRow(dest)
+       }
+       return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
new file mode 100644 (file)
index 0000000..ce7fe4c
--- /dev/null
@@ -0,0 +1,211 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "database/sql/driver"
+       "fmt"
+       "io"
+       "reflect"
+       "strconv"
+)
+
+type mysqlStmt struct {
+       mc         *mysqlConn
+       id         uint32
+       paramCount int
+}
+
+func (stmt *mysqlStmt) Close() error {
+       if stmt.mc == nil || stmt.mc.closed.IsSet() {
+               // driver.Stmt.Close can be called more than once, thus this function
+               // has to be idempotent.
+               // See also Issue #450 and golang/go#16019.
+               //errLog.Print(ErrInvalidConn)
+               return driver.ErrBadConn
+       }
+
+       err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+       stmt.mc = nil
+       return err
+}
+
+func (stmt *mysqlStmt) NumInput() int {
+       return stmt.paramCount
+}
+
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+       return converter{}
+}
+
+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+       if stmt.mc.closed.IsSet() {
+               errLog.Print(ErrInvalidConn)
+               return nil, driver.ErrBadConn
+       }
+       // Send command
+       err := stmt.writeExecutePacket(args)
+       if err != nil {
+               return nil, stmt.mc.markBadConn(err)
+       }
+
+       mc := stmt.mc
+
+       mc.affectedRows = 0
+       mc.insertId = 0
+
+       // Read Result
+       resLen, err := mc.readResultSetHeaderPacket()
+       if err != nil {
+               return nil, err
+       }
+
+       if resLen > 0 {
+               // Columns
+               if err = mc.readUntilEOF(); err != nil {
+                       return nil, err
+               }
+
+               // Rows
+               if err := mc.readUntilEOF(); err != nil {
+                       return nil, err
+               }
+       }
+
+       if err := mc.discardResults(); err != nil {
+               return nil, err
+       }
+
+       return &mysqlResult{
+               affectedRows: int64(mc.affectedRows),
+               insertId:     int64(mc.insertId),
+       }, nil
+}
+
+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+       return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+       if stmt.mc.closed.IsSet() {
+               errLog.Print(ErrInvalidConn)
+               return nil, driver.ErrBadConn
+       }
+       // Send command
+       err := stmt.writeExecutePacket(args)
+       if err != nil {
+               return nil, stmt.mc.markBadConn(err)
+       }
+
+       mc := stmt.mc
+
+       // Read Result
+       resLen, err := mc.readResultSetHeaderPacket()
+       if err != nil {
+               return nil, err
+       }
+
+       rows := new(binaryRows)
+
+       if resLen > 0 {
+               rows.mc = mc
+               rows.rs.columns, err = mc.readColumns(resLen)
+       } else {
+               rows.rs.done = true
+
+               switch err := rows.NextResultSet(); err {
+               case nil, io.EOF:
+                       return rows, nil
+               default:
+                       return nil, err
+               }
+       }
+
+       return rows, err
+}
+
+type converter struct{}
+
+// ConvertValue mirrors the reference/default converter in database/sql/driver
+// with _one_ exception.  We support uint64 with their high bit and the default
+// implementation does not.  This function should be kept in sync with
+// database/sql/driver defaultConverter.ConvertValue() except for that
+// deliberate difference.
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+       if driver.IsValue(v) {
+               return v, nil
+       }
+
+       if vr, ok := v.(driver.Valuer); ok {
+               sv, err := callValuerValue(vr)
+               if err != nil {
+                       return nil, err
+               }
+               if !driver.IsValue(sv) {
+                       return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+               }
+               return sv, nil
+       }
+
+       rv := reflect.ValueOf(v)
+       switch rv.Kind() {
+       case reflect.Ptr:
+               // indirect pointers
+               if rv.IsNil() {
+                       return nil, nil
+               } else {
+                       return c.ConvertValue(rv.Elem().Interface())
+               }
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return rv.Int(), nil
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
+               return int64(rv.Uint()), nil
+       case reflect.Uint64:
+               u64 := rv.Uint()
+               if u64 >= 1<<63 {
+                       return strconv.FormatUint(u64, 10), nil
+               }
+               return int64(u64), nil
+       case reflect.Float32, reflect.Float64:
+               return rv.Float(), nil
+       case reflect.Bool:
+               return rv.Bool(), nil
+       case reflect.Slice:
+               ek := rv.Type().Elem().Kind()
+               if ek == reflect.Uint8 {
+                       return rv.Bytes(), nil
+               }
+               return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
+       case reflect.String:
+               return rv.String(), nil
+       }
+       return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This is an exact copy of the same-named unexported function from the
+// database/sql package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+       if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+               rv.IsNil() &&
+               rv.Type().Elem().Implements(valuerReflectType) {
+               return nil, nil
+       }
+       return vr.Value()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement_test.go b/vendor/github.com/go-sql-driver/mysql/statement_test.go
new file mode 100644 (file)
index 0000000..98a6c19
--- /dev/null
@@ -0,0 +1,126 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "testing"
+)
+
+func TestConvertDerivedString(t *testing.T) {
+       type derived string
+
+       output, err := converter{}.ConvertValue(derived("value"))
+       if err != nil {
+               t.Fatal("Derived string type not convertible", err)
+       }
+
+       if output != "value" {
+               t.Fatalf("Derived string type not converted, got %#v %T", output, output)
+       }
+}
+
+func TestConvertDerivedByteSlice(t *testing.T) {
+       type derived []uint8
+
+       output, err := converter{}.ConvertValue(derived("value"))
+       if err != nil {
+               t.Fatal("Byte slice not convertible", err)
+       }
+
+       if bytes.Compare(output.([]byte), []byte("value")) != 0 {
+               t.Fatalf("Byte slice not converted, got %#v %T", output, output)
+       }
+}
+
+func TestConvertDerivedUnsupportedSlice(t *testing.T) {
+       type derived []int
+
+       _, err := converter{}.ConvertValue(derived{1})
+       if err == nil || err.Error() != "unsupported type mysql.derived, a slice of int" {
+               t.Fatal("Unexpected error", err)
+       }
+}
+
+func TestConvertDerivedBool(t *testing.T) {
+       type derived bool
+
+       output, err := converter{}.ConvertValue(derived(true))
+       if err != nil {
+               t.Fatal("Derived bool type not convertible", err)
+       }
+
+       if output != true {
+               t.Fatalf("Derived bool type not converted, got %#v %T", output, output)
+       }
+}
+
+func TestConvertPointer(t *testing.T) {
+       str := "value"
+
+       output, err := converter{}.ConvertValue(&str)
+       if err != nil {
+               t.Fatal("Pointer type not convertible", err)
+       }
+
+       if output != "value" {
+               t.Fatalf("Pointer type not converted, got %#v %T", output, output)
+       }
+}
+
+func TestConvertSignedIntegers(t *testing.T) {
+       values := []interface{}{
+               int8(-42),
+               int16(-42),
+               int32(-42),
+               int64(-42),
+               int(-42),
+       }
+
+       for _, value := range values {
+               output, err := converter{}.ConvertValue(value)
+               if err != nil {
+                       t.Fatalf("%T type not convertible %s", value, err)
+               }
+
+               if output != int64(-42) {
+                       t.Fatalf("%T type not converted, got %#v %T", value, output, output)
+               }
+       }
+}
+
+func TestConvertUnsignedIntegers(t *testing.T) {
+       values := []interface{}{
+               uint8(42),
+               uint16(42),
+               uint32(42),
+               uint64(42),
+               uint(42),
+       }
+
+       for _, value := range values {
+               output, err := converter{}.ConvertValue(value)
+               if err != nil {
+                       t.Fatalf("%T type not convertible %s", value, err)
+               }
+
+               if output != int64(42) {
+                       t.Fatalf("%T type not converted, got %#v %T", value, output, output)
+               }
+       }
+
+       output, err := converter{}.ConvertValue(^uint64(0))
+       if err != nil {
+               t.Fatal("uint64 high-bit not convertible", err)
+       }
+
+       if output != "18446744073709551615" {
+               t.Fatalf("uint64 high-bit not converted, got %#v %T", output, output)
+       }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
new file mode 100644 (file)
index 0000000..417d727
--- /dev/null
@@ -0,0 +1,31 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlTx struct {
+       mc *mysqlConn
+}
+
+func (tx *mysqlTx) Commit() (err error) {
+       if tx.mc == nil || tx.mc.closed.IsSet() {
+               return ErrInvalidConn
+       }
+       err = tx.mc.exec("COMMIT")
+       tx.mc = nil
+       return
+}
+
+func (tx *mysqlTx) Rollback() (err error) {
+       if tx.mc == nil || tx.mc.closed.IsSet() {
+               return ErrInvalidConn
+       }
+       err = tx.mc.exec("ROLLBACK")
+       tx.mc = nil
+       return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
new file mode 100644 (file)
index 0000000..cb3650b
--- /dev/null
@@ -0,0 +1,755 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "crypto/tls"
+       "database/sql"
+       "database/sql/driver"
+       "encoding/binary"
+       "errors"
+       "fmt"
+       "io"
+       "strconv"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "time"
+)
+
+// Registry for custom tls.Configs
+var (
+       tlsConfigLock     sync.RWMutex
+       tlsConfigRegistry map[string]*tls.Config
+)
+
+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+// Use the key as a value in the DSN where tls=value.
+//
+// Note: The provided tls.Config is exclusively owned by the driver after
+// registering it.
+//
+//  rootCertPool := x509.NewCertPool()
+//  pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+//  if err != nil {
+//      log.Fatal(err)
+//  }
+//  if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+//      log.Fatal("Failed to append PEM.")
+//  }
+//  clientCert := make([]tls.Certificate, 0, 1)
+//  certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+//  if err != nil {
+//      log.Fatal(err)
+//  }
+//  clientCert = append(clientCert, certs)
+//  mysql.RegisterTLSConfig("custom", &tls.Config{
+//      RootCAs: rootCertPool,
+//      Certificates: clientCert,
+//  })
+//  db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+//
+func RegisterTLSConfig(key string, config *tls.Config) error {
+       if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
+               return fmt.Errorf("key '%s' is reserved", key)
+       }
+
+       tlsConfigLock.Lock()
+       if tlsConfigRegistry == nil {
+               tlsConfigRegistry = make(map[string]*tls.Config)
+       }
+
+       tlsConfigRegistry[key] = config
+       tlsConfigLock.Unlock()
+       return nil
+}
+
+// DeregisterTLSConfig removes the tls.Config associated with key.
+func DeregisterTLSConfig(key string) {
+       tlsConfigLock.Lock()
+       if tlsConfigRegistry != nil {
+               delete(tlsConfigRegistry, key)
+       }
+       tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+       tlsConfigLock.RLock()
+       if v, ok := tlsConfigRegistry[key]; ok {
+               config = v.Clone()
+       }
+       tlsConfigLock.RUnlock()
+       return
+}
+
+// Returns the bool value of the input.
+// The 2nd return value indicates if the input was a valid bool value
+func readBool(input string) (value bool, valid bool) {
+       switch input {
+       case "1", "true", "TRUE", "True":
+               return true, true
+       case "0", "false", "FALSE", "False":
+               return false, true
+       }
+
+       // Not a valid bool value
+       return
+}
+
+/******************************************************************************
+*                           Time related utils                                *
+******************************************************************************/
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+//  var nt NullTime
+//  err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+//  ...
+//  if nt.Valid {
+//     // use nt.Time
+//  } else {
+//     // NULL value
+//  }
+//
+// This NullTime implementation is not driver-specific
+type NullTime struct {
+       Time  time.Time
+       Valid bool // Valid is true if Time is not NULL
+}
+
+// Scan implements the Scanner interface.
+// The value type must be time.Time or string / []byte (formatted time-string),
+// otherwise Scan fails.
+func (nt *NullTime) Scan(value interface{}) (err error) {
+       if value == nil {
+               nt.Time, nt.Valid = time.Time{}, false
+               return
+       }
+
+       switch v := value.(type) {
+       case time.Time:
+               nt.Time, nt.Valid = v, true
+               return
+       case []byte:
+               nt.Time, err = parseDateTime(string(v), time.UTC)
+               nt.Valid = (err == nil)
+               return
+       case string:
+               nt.Time, err = parseDateTime(v, time.UTC)
+               nt.Valid = (err == nil)
+               return
+       }
+
+       nt.Valid = false
+       return fmt.Errorf("Can't convert %T to time.Time", value)
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+       if !nt.Valid {
+               return nil, nil
+       }
+       return nt.Time, nil
+}
+
+func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
+       base := "0000-00-00 00:00:00.0000000"
+       switch len(str) {
+       case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+               if str == base[:len(str)] {
+                       return
+               }
+               t, err = time.Parse(timeFormat[:len(str)], str)
+       default:
+               err = fmt.Errorf("invalid time string: %s", str)
+               return
+       }
+
+       // Adjust location
+       if err == nil && loc != time.UTC {
+               y, mo, d := t.Date()
+               h, mi, s := t.Clock()
+               t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
+       }
+
+       return
+}
+
+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+       switch num {
+       case 0:
+               return time.Time{}, nil
+       case 4:
+               return time.Date(
+                       int(binary.LittleEndian.Uint16(data[:2])), // year
+                       time.Month(data[2]),                       // month
+                       int(data[3]),                              // day
+                       0, 0, 0, 0,
+                       loc,
+               ), nil
+       case 7:
+               return time.Date(
+                       int(binary.LittleEndian.Uint16(data[:2])), // year
+                       time.Month(data[2]),                       // month
+                       int(data[3]),                              // day
+                       int(data[4]),                              // hour
+                       int(data[5]),                              // minutes
+                       int(data[6]),                              // seconds
+                       0,
+                       loc,
+               ), nil
+       case 11:
+               return time.Date(
+                       int(binary.LittleEndian.Uint16(data[:2])), // year
+                       time.Month(data[2]),                       // month
+                       int(data[3]),                              // day
+                       int(data[4]),                              // hour
+                       int(data[5]),                              // minutes
+                       int(data[6]),                              // seconds
+                       int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+                       loc,
+               ), nil
+       }
+       return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+}
+
+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+// if the DATE or DATETIME has the zero value.
+// It must never be changed.
+// The current behavior depends on database/sql copying the result.
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+
+func appendMicrosecs(dst, src []byte, decimals int) []byte {
+       if decimals <= 0 {
+               return dst
+       }
+       if len(src) == 0 {
+               return append(dst, ".000000"[:decimals+1]...)
+       }
+
+       microsecs := binary.LittleEndian.Uint32(src[:4])
+       p1 := byte(microsecs / 10000)
+       microsecs -= 10000 * uint32(p1)
+       p2 := byte(microsecs / 100)
+       microsecs -= 100 * uint32(p2)
+       p3 := byte(microsecs)
+
+       switch decimals {
+       default:
+               return append(dst, '.',
+                       digits10[p1], digits01[p1],
+                       digits10[p2], digits01[p2],
+                       digits10[p3], digits01[p3],
+               )
+       case 1:
+               return append(dst, '.',
+                       digits10[p1],
+               )
+       case 2:
+               return append(dst, '.',
+                       digits10[p1], digits01[p1],
+               )
+       case 3:
+               return append(dst, '.',
+                       digits10[p1], digits01[p1],
+                       digits10[p2],
+               )
+       case 4:
+               return append(dst, '.',
+                       digits10[p1], digits01[p1],
+                       digits10[p2], digits01[p2],
+               )
+       case 5:
+               return append(dst, '.',
+                       digits10[p1], digits01[p1],
+                       digits10[p2], digits01[p2],
+                       digits10[p3],
+               )
+       }
+}
+
+func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
+       // length expects the deterministic length of the zero value,
+       // negative time and 100+ hours are automatically added if needed
+       if len(src) == 0 {
+               return zeroDateTime[:length], nil
+       }
+       var dst []byte      // return value
+       var p1, p2, p3 byte // current digit pair
+
+       switch length {
+       case 10, 19, 21, 22, 23, 24, 25, 26:
+       default:
+               t := "DATE"
+               if length > 10 {
+                       t += "TIME"
+               }
+               return nil, fmt.Errorf("illegal %s length %d", t, length)
+       }
+       switch len(src) {
+       case 4, 7, 11:
+       default:
+               t := "DATE"
+               if length > 10 {
+                       t += "TIME"
+               }
+               return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+       }
+       dst = make([]byte, 0, length)
+       // start with the date
+       year := binary.LittleEndian.Uint16(src[:2])
+       pt := year / 100
+       p1 = byte(year - 100*uint16(pt))
+       p2, p3 = src[2], src[3]
+       dst = append(dst,
+               digits10[pt], digits01[pt],
+               digits10[p1], digits01[p1], '-',
+               digits10[p2], digits01[p2], '-',
+               digits10[p3], digits01[p3],
+       )
+       if length == 10 {
+               return dst, nil
+       }
+       if len(src) == 4 {
+               return append(dst, zeroDateTime[10:length]...), nil
+       }
+       dst = append(dst, ' ')
+       p1 = src[4] // hour
+       src = src[5:]
+
+       // p1 is 2-digit hour, src is after hour
+       p2, p3 = src[0], src[1]
+       dst = append(dst,
+               digits10[p1], digits01[p1], ':',
+               digits10[p2], digits01[p2], ':',
+               digits10[p3], digits01[p3],
+       )
+       return appendMicrosecs(dst, src[2:], int(length)-20), nil
+}
+
+func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
+       // length expects the deterministic length of the zero value,
+       // negative time and 100+ hours are automatically added if needed
+       if len(src) == 0 {
+               return zeroDateTime[11 : 11+length], nil
+       }
+       var dst []byte // return value
+
+       switch length {
+       case
+               8,                      // time (can be up to 10 when negative and 100+ hours)
+               10, 11, 12, 13, 14, 15: // time with fractional seconds
+       default:
+               return nil, fmt.Errorf("illegal TIME length %d", length)
+       }
+       switch len(src) {
+       case 8, 12:
+       default:
+               return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+       }
+       // +2 to enable negative time and 100+ hours
+       dst = make([]byte, 0, length+2)
+       if src[0] == 1 {
+               dst = append(dst, '-')
+       }
+       days := binary.LittleEndian.Uint32(src[1:5])
+       hours := int64(days)*24 + int64(src[5])
+
+       if hours >= 100 {
+               dst = strconv.AppendInt(dst, hours, 10)
+       } else {
+               dst = append(dst, digits10[hours], digits01[hours])
+       }
+
+       min, sec := src[6], src[7]
+       dst = append(dst, ':',
+               digits10[min], digits01[min], ':',
+               digits10[sec], digits01[sec],
+       )
+       return appendMicrosecs(dst, src[8:], int(length)-9), nil
+}
+
+/******************************************************************************
+*                       Convert from and to bytes                             *
+******************************************************************************/
+
+func uint64ToBytes(n uint64) []byte {
+       return []byte{
+               byte(n),
+               byte(n >> 8),
+               byte(n >> 16),
+               byte(n >> 24),
+               byte(n >> 32),
+               byte(n >> 40),
+               byte(n >> 48),
+               byte(n >> 56),
+       }
+}
+
+func uint64ToString(n uint64) []byte {
+       var a [20]byte
+       i := 20
+
+       // U+0030 = 0
+       // ...
+       // U+0039 = 9
+
+       var q uint64
+       for n >= 10 {
+               i--
+               q = n / 10
+               a[i] = uint8(n-q*10) + 0x30
+               n = q
+       }
+
+       i--
+       a[i] = uint8(n) + 0x30
+
+       return a[i:]
+}
+
+// treats string value as unsigned integer representation
+func stringToInt(b []byte) int {
+       val := 0
+       for i := range b {
+               val *= 10
+               val += int(b[i] - 0x30)
+       }
+       return val
+}
+
+// returns the string read as a bytes slice, wheter the value is NULL,
+// the number of bytes read and an error, in case the string is longer than
+// the input slice
+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+       // Get length
+       num, isNull, n := readLengthEncodedInteger(b)
+       if num < 1 {
+               return b[n:n], isNull, n, nil
+       }
+
+       n += int(num)
+
+       // Check data length
+       if len(b) >= n {
+               return b[n-int(num) : n : n], false, n, nil
+       }
+       return nil, false, n, io.EOF
+}
+
+// returns the number of bytes skipped and an error, in case the string is
+// longer than the input slice
+func skipLengthEncodedString(b []byte) (int, error) {
+       // Get length
+       num, _, n := readLengthEncodedInteger(b)
+       if num < 1 {
+               return n, nil
+       }
+
+       n += int(num)
+
+       // Check data length
+       if len(b) >= n {
+               return n, nil
+       }
+       return n, io.EOF
+}
+
+// returns the number read, whether the value is NULL and the number of bytes read
+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+       // See issue #349
+       if len(b) == 0 {
+               return 0, true, 1
+       }
+
+       switch b[0] {
+       // 251: NULL
+       case 0xfb:
+               return 0, true, 1
+
+       // 252: value of following 2
+       case 0xfc:
+               return uint64(b[1]) | uint64(b[2])<<8, false, 3
+
+       // 253: value of following 3
+       case 0xfd:
+               return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+
+       // 254: value of following 8
+       case 0xfe:
+               return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+                               uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+                               uint64(b[7])<<48 | uint64(b[8])<<56,
+                       false, 9
+       }
+
+       // 0-250: value of first byte
+       return uint64(b[0]), false, 1
+}
+
+// encodes a uint64 value and appends it to the given bytes slice
+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+       switch {
+       case n <= 250:
+               return append(b, byte(n))
+
+       case n <= 0xffff:
+               return append(b, 0xfc, byte(n), byte(n>>8))
+
+       case n <= 0xffffff:
+               return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+       }
+       return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+               byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+       newSize := len(buf) + appendSize
+       if cap(buf) < newSize {
+               // Grow buffer exponentially
+               newBuf := make([]byte, len(buf)*2+appendSize)
+               copy(newBuf, buf)
+               buf = newBuf
+       }
+       return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+       pos := len(buf)
+       buf = reserveBuffer(buf, len(v)*2)
+
+       for _, c := range v {
+               switch c {
+               case '\x00':
+                       buf[pos] = '\\'
+                       buf[pos+1] = '0'
+                       pos += 2
+               case '\n':
+                       buf[pos] = '\\'
+                       buf[pos+1] = 'n'
+                       pos += 2
+               case '\r':
+                       buf[pos] = '\\'
+                       buf[pos+1] = 'r'
+                       pos += 2
+               case '\x1a':
+                       buf[pos] = '\\'
+                       buf[pos+1] = 'Z'
+                       pos += 2
+               case '\'':
+                       buf[pos] = '\\'
+                       buf[pos+1] = '\''
+                       pos += 2
+               case '"':
+                       buf[pos] = '\\'
+                       buf[pos+1] = '"'
+                       pos += 2
+               case '\\':
+                       buf[pos] = '\\'
+                       buf[pos+1] = '\\'
+                       pos += 2
+               default:
+                       buf[pos] = c
+                       pos++
+               }
+       }
+
+       return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+       pos := len(buf)
+       buf = reserveBuffer(buf, len(v)*2)
+
+       for i := 0; i < len(v); i++ {
+               c := v[i]
+               switch c {
+               case '\x00':
+                       buf[pos] = '\\'
+                       buf[pos+1] = '0'
+                       pos += 2
+               case '\n':
+                       buf[pos] = '\\'
+                       buf[pos+1] = 'n'
+                       pos += 2
+               case '\r':
+                       buf[pos] = '\\'
+                       buf[pos+1] = 'r'
+                       pos += 2
+               case '\x1a':
+                       buf[pos] = '\\'
+                       buf[pos+1] = 'Z'
+                       pos += 2
+               case '\'':
+                       buf[pos] = '\\'
+                       buf[pos+1] = '\''
+                       pos += 2
+               case '"':
+                       buf[pos] = '\\'
+                       buf[pos+1] = '"'
+                       pos += 2
+               case '\\':
+                       buf[pos] = '\\'
+                       buf[pos+1] = '\\'
+                       pos += 2
+               default:
+                       buf[pos] = c
+                       pos++
+               }
+       }
+
+       return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+       pos := len(buf)
+       buf = reserveBuffer(buf, len(v)*2)
+
+       for _, c := range v {
+               if c == '\'' {
+                       buf[pos] = '\''
+                       buf[pos+1] = '\''
+                       pos += 2
+               } else {
+                       buf[pos] = c
+                       pos++
+               }
+       }
+
+       return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+       pos := len(buf)
+       buf = reserveBuffer(buf, len(v)*2)
+
+       for i := 0; i < len(v); i++ {
+               c := v[i]
+               if c == '\'' {
+                       buf[pos] = '\''
+                       buf[pos+1] = '\''
+                       pos += 2
+               } else {
+                       buf[pos] = c
+                       pos++
+               }
+       }
+
+       return buf[:pos]
+}
+
+/******************************************************************************
+*                               Sync utils                                    *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+       _noCopy noCopy
+       value   uint32
+}
+
+// IsSet returns wether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+       return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+       if value {
+               atomic.StoreUint32(&ab.value, 1)
+       } else {
+               atomic.StoreUint32(&ab.value, 0)
+       }
+}
+
+// TrySet sets the value of the bool and returns wether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+       if value {
+               return atomic.SwapUint32(&ab.value, 1) == 0
+       }
+       return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicError is a wrapper for atomically accessed error values
+type atomicError struct {
+       _noCopy noCopy
+       value   atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+       ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+       if v := ae.value.Load(); v != nil {
+               // this will panic if the value doesn't implement the error interface
+               return v.(error)
+       }
+       return nil
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+       dargs := make([]driver.Value, len(named))
+       for n, param := range named {
+               if len(param.Name) > 0 {
+                       // TODO: support the use of Named Parameters #561
+                       return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+               }
+               dargs[n] = param.Value
+       }
+       return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+       switch sql.IsolationLevel(level) {
+       case sql.LevelRepeatableRead:
+               return "REPEATABLE READ", nil
+       case sql.LevelReadCommitted:
+               return "READ COMMITTED", nil
+       case sql.LevelReadUncommitted:
+               return "READ UNCOMMITTED", nil
+       case sql.LevelSerializable:
+               return "SERIALIZABLE", nil
+       default:
+               return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+       }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/go-sql-driver/mysql/utils_test.go
new file mode 100644 (file)
index 0000000..8951a7a
--- /dev/null
@@ -0,0 +1,334 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+       "bytes"
+       "database/sql"
+       "database/sql/driver"
+       "encoding/binary"
+       "testing"
+       "time"
+)
+
+func TestScanNullTime(t *testing.T) {
+       var scanTests = []struct {
+               in    interface{}
+               error bool
+               valid bool
+               time  time.Time
+       }{
+               {tDate, false, true, tDate},
+               {sDate, false, true, tDate},
+               {[]byte(sDate), false, true, tDate},
+               {tDateTime, false, true, tDateTime},
+               {sDateTime, false, true, tDateTime},
+               {[]byte(sDateTime), false, true, tDateTime},
+               {tDate0, false, true, tDate0},
+               {sDate0, false, true, tDate0},
+               {[]byte(sDate0), false, true, tDate0},
+               {sDateTime0, false, true, tDate0},
+               {[]byte(sDateTime0), false, true, tDate0},
+               {"", true, false, tDate0},
+               {"1234", true, false, tDate0},
+               {0, true, false, tDate0},
+       }
+
+       var nt = NullTime{}
+       var err error
+
+       for _, tst := range scanTests {
+               err = nt.Scan(tst.in)
+               if (err != nil) != tst.error {
+                       t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
+               }
+               if nt.Valid != tst.valid {
+                       t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
+               }
+               if nt.Time != tst.time {
+                       t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
+               }
+       }
+}
+
+func TestLengthEncodedInteger(t *testing.T) {
+       var integerTests = []struct {
+               num     uint64
+               encoded []byte
+       }{
+               {0x0000000000000000, []byte{0x00}},
+               {0x0000000000000012, []byte{0x12}},
+               {0x00000000000000fa, []byte{0xfa}},
+               {0x0000000000000100, []byte{0xfc, 0x00, 0x01}},
+               {0x0000000000001234, []byte{0xfc, 0x34, 0x12}},
+               {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}},
+               {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}},
+               {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}},
+               {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}},
+               {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}},
+               {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}},
+               {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+       }
+
+       for _, tst := range integerTests {
+               num, isNull, numLen := readLengthEncodedInteger(tst.encoded)
+               if isNull {
+                       t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num)
+               }
+               if num != tst.num {
+                       t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num)
+               }
+               if numLen != len(tst.encoded) {
+                       t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen)
+               }
+               encoded := appendLengthEncodedInteger(nil, num)
+               if !bytes.Equal(encoded, tst.encoded) {
+                       t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded)
+               }
+       }
+}
+
+func TestFormatBinaryDateTime(t *testing.T) {
+       rawDate := [11]byte{}
+       binary.LittleEndian.PutUint16(rawDate[:2], 1978)   // years
+       rawDate[2] = 12                                    // months
+       rawDate[3] = 30                                    // days
+       rawDate[4] = 15                                    // hours
+       rawDate[5] = 46                                    // minutes
+       rawDate[6] = 23                                    // seconds
+       binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
+       expect := func(expected string, inlen, outlen uint8) {
+               actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen)
+               bytes, ok := actual.([]byte)
+               if !ok {
+                       t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
+               }
+               if string(bytes) != expected {
+                       t.Errorf(
+                               "expected %q, got %q for length in %d, out %d",
+                               expected, actual, inlen, outlen,
+                       )
+               }
+       }
+       expect("0000-00-00", 0, 10)
+       expect("0000-00-00 00:00:00", 0, 19)
+       expect("1978-12-30", 4, 10)
+       expect("1978-12-30 15:46:23", 7, 19)
+       expect("1978-12-30 15:46:23.987654", 11, 26)
+}
+
+func TestFormatBinaryTime(t *testing.T) {
+       expect := func(expected string, src []byte, outlen uint8) {
+               actual, _ := formatBinaryTime(src, outlen)
+               bytes, ok := actual.([]byte)
+               if !ok {
+                       t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
+               }
+               if string(bytes) != expected {
+                       t.Errorf(
+                               "expected %q, got %q for src=%q and outlen=%d",
+                               expected, actual, src, outlen)
+               }
+       }
+
+       // binary format:
+       // sign (0: positive, 1: negative), days(4), hours, minutes, seconds, micro(4)
+
+       // Zeros
+       expect("00:00:00", []byte{}, 8)
+       expect("00:00:00.0", []byte{}, 10)
+       expect("00:00:00.000000", []byte{}, 15)
+
+       // Without micro(4)
+       expect("12:34:56", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 8)
+       expect("-12:34:56", []byte{1, 0, 0, 0, 0, 12, 34, 56}, 8)
+       expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 11)
+       expect("24:34:56", []byte{0, 1, 0, 0, 0, 0, 34, 56}, 8)
+       expect("-99:34:56", []byte{1, 4, 0, 0, 0, 3, 34, 56}, 8)
+       expect("103079215103:34:56", []byte{0, 255, 255, 255, 255, 23, 34, 56}, 8)
+
+       // With micro(4)
+       expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 11)
+       expect("12:34:56.000099", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 15)
+}
+
+func TestEscapeBackslash(t *testing.T) {
+       expect := func(expected, value string) {
+               actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
+               if actual != expected {
+                       t.Errorf(
+                               "expected %s, got %s",
+                               expected, actual,
+                       )
+               }
+
+               actual = string(escapeStringBackslash([]byte{}, value))
+               if actual != expected {
+                       t.Errorf(
+                               "expected %s, got %s",
+                               expected, actual,
+                       )
+               }
+       }
+
+       expect("foo\\0bar", "foo\x00bar")
+       expect("foo\\nbar", "foo\nbar")
+       expect("foo\\rbar", "foo\rbar")
+       expect("foo\\Zbar", "foo\x1abar")
+       expect("foo\\\"bar", "foo\"bar")
+       expect("foo\\\\bar", "foo\\bar")
+       expect("foo\\'bar", "foo'bar")
+}
+
+func TestEscapeQuotes(t *testing.T) {
+       expect := func(expected, value string) {
+               actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
+               if actual != expected {
+                       t.Errorf(
+                               "expected %s, got %s",
+                               expected, actual,
+                       )
+               }
+
+               actual = string(escapeStringQuotes([]byte{}, value))
+               if actual != expected {
+                       t.Errorf(
+                               "expected %s, got %s",
+                               expected, actual,
+                       )
+               }
+       }
+
+       expect("foo\x00bar", "foo\x00bar") // not affected
+       expect("foo\nbar", "foo\nbar")     // not affected
+       expect("foo\rbar", "foo\rbar")     // not affected
+       expect("foo\x1abar", "foo\x1abar") // not affected
+       expect("foo''bar", "foo'bar")      // affected
+       expect("foo\"bar", "foo\"bar")     // not affected
+}
+
+func TestAtomicBool(t *testing.T) {
+       var ab atomicBool
+       if ab.IsSet() {
+               t.Fatal("Expected value to be false")
+       }
+
+       ab.Set(true)
+       if ab.value != 1 {
+               t.Fatal("Set(true) did not set value to 1")
+       }
+       if !ab.IsSet() {
+               t.Fatal("Expected value to be true")
+       }
+
+       ab.Set(true)
+       if !ab.IsSet() {
+               t.Fatal("Expected value to be true")
+       }
+
+       ab.Set(false)
+       if ab.value != 0 {
+               t.Fatal("Set(false) did not set value to 0")
+       }
+       if ab.IsSet() {
+               t.Fatal("Expected value to be false")
+       }
+
+       ab.Set(false)
+       if ab.IsSet() {
+               t.Fatal("Expected value to be false")
+       }
+       if ab.TrySet(false) {
+               t.Fatal("Expected TrySet(false) to fail")
+       }
+       if !ab.TrySet(true) {
+               t.Fatal("Expected TrySet(true) to succeed")
+       }
+       if !ab.IsSet() {
+               t.Fatal("Expected value to be true")
+       }
+
+       ab.Set(true)
+       if !ab.IsSet() {
+               t.Fatal("Expected value to be true")
+       }
+       if ab.TrySet(true) {
+               t.Fatal("Expected TrySet(true) to fail")
+       }
+       if !ab.TrySet(false) {
+               t.Fatal("Expected TrySet(false) to succeed")
+       }
+       if ab.IsSet() {
+               t.Fatal("Expected value to be false")
+       }
+
+       ab._noCopy.Lock() // we've "tested" it ¯\_(ツ)_/¯
+}
+
+func TestAtomicError(t *testing.T) {
+       var ae atomicError
+       if ae.Value() != nil {
+               t.Fatal("Expected value to be nil")
+       }
+
+       ae.Set(ErrMalformPkt)
+       if v := ae.Value(); v != ErrMalformPkt {
+               if v == nil {
+                       t.Fatal("Value is still nil")
+               }
+               t.Fatal("Error did not match")
+       }
+       ae.Set(ErrPktSync)
+       if ae.Value() == ErrMalformPkt {
+               t.Fatal("Error still matches old error")
+       }
+       if v := ae.Value(); v != ErrPktSync {
+               t.Fatal("Error did not match")
+       }
+}
+
+func TestIsolationLevelMapping(t *testing.T) {
+       data := []struct {
+               level    driver.IsolationLevel
+               expected string
+       }{
+               {
+                       level:    driver.IsolationLevel(sql.LevelReadCommitted),
+                       expected: "READ COMMITTED",
+               },
+               {
+                       level:    driver.IsolationLevel(sql.LevelRepeatableRead),
+                       expected: "REPEATABLE READ",
+               },
+               {
+                       level:    driver.IsolationLevel(sql.LevelReadUncommitted),
+                       expected: "READ UNCOMMITTED",
+               },
+               {
+                       level:    driver.IsolationLevel(sql.LevelSerializable),
+                       expected: "SERIALIZABLE",
+               },
+       }
+
+       for i, td := range data {
+               if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
+                       t.Fatal(i, td.expected, actual, err)
+               }
+       }
+
+       // check unsupported mapping
+       expectedErr := "mysql: unsupported isolation level: 7"
+       actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable))
+       if actual != "" || err == nil {
+               t.Fatal("Expected error on unsupported isolation level")
+       }
+       if err.Error() != expectedErr {
+               t.Fatalf("Expected error to be %q, got %q", expectedErr, err)
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/.codeclimate.yml b/vendor/github.com/jinzhu/gorm/.codeclimate.yml
new file mode 100755 (executable)
index 0000000..51aba50
--- /dev/null
@@ -0,0 +1,11 @@
+---
+engines:
+  gofmt:
+    enabled: true
+  govet:
+    enabled: true
+  golint:
+    enabled: true
+ratings:
+  paths:
+  - "**.go"
diff --git a/vendor/github.com/jinzhu/gorm/.github/ISSUE_TEMPLATE.md b/vendor/github.com/jinzhu/gorm/.github/ISSUE_TEMPLATE.md
new file mode 100755 (executable)
index 0000000..a0b64bf
--- /dev/null
@@ -0,0 +1,45 @@
+Your issue may already be reported! Please search on the [issue track](https://github.com/jinzhu/gorm/issues) before creating one.
+
+### What version of Go are you using (`go version`)?
+
+
+### Which database and its version are you using?
+
+
+### Please provide a complete runnable program to reproduce your issue. **IMPORTANT**
+
+Need to runnable with [GORM's docker compose config](https://github.com/jinzhu/gorm/blob/master/docker-compose.yml) or please provides your config.
+
+```go
+package main
+
+import (
+       "github.com/jinzhu/gorm"
+       _ "github.com/jinzhu/gorm/dialects/mssql"
+       _ "github.com/jinzhu/gorm/dialects/mysql"
+       _ "github.com/jinzhu/gorm/dialects/postgres"
+       _ "github.com/jinzhu/gorm/dialects/sqlite"
+)
+
+var db *gorm.DB
+
+func init() {
+       var err error
+       db, err = gorm.Open("sqlite3", "test.db")
+       // db, err = gorm.Open("postgres", "user=gorm password=gorm DB.name=gorm port=9920 sslmode=disable")
+       // db, err = gorm.Open("mysql", "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True")
+       // db, err = gorm.Open("mssql", "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm")
+       if err != nil {
+               panic(err)
+       }
+       db.LogMode(true)
+}
+
+func main() {
+       if /* failure condition */ {
+               fmt.Println("failed")
+       } else {
+               fmt.Println("success")
+       }
+}
+```
diff --git a/vendor/github.com/jinzhu/gorm/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/jinzhu/gorm/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100755 (executable)
index 0000000..b467b6c
--- /dev/null
@@ -0,0 +1,9 @@
+Make sure these boxes checked before submitting your pull request.
+
+- [] Do only one thing
+- [] No API-breaking changes
+- [] New code/logic commented & tested
+
+For significant changes like big bug fixes, new features, please open an issue to make an agreement on an implementation design/plan first before starting it.
+
+### What did this pull request do?
diff --git a/vendor/github.com/jinzhu/gorm/.gitignore b/vendor/github.com/jinzhu/gorm/.gitignore
new file mode 100755 (executable)
index 0000000..01dc5ce
--- /dev/null
@@ -0,0 +1,2 @@
+documents
+_book
diff --git a/vendor/github.com/jinzhu/gorm/License b/vendor/github.com/jinzhu/gorm/License
new file mode 100755 (executable)
index 0000000..037e165
--- /dev/null
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-NOW  Jinzhu <wosmvp@gmail.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/jinzhu/gorm/README.md b/vendor/github.com/jinzhu/gorm/README.md
new file mode 100755 (executable)
index 0000000..0c5c7ea
--- /dev/null
@@ -0,0 +1,40 @@
+# GORM
+
+The fantastic ORM library for Golang, aims to be developer friendly.
+
+[![go report card](https://goreportcard.com/badge/github.com/jinzhu/gorm "go report card")](https://goreportcard.com/report/github.com/jinzhu/gorm)
+[![wercker status](https://app.wercker.com/status/8596cace912c9947dd9c8542ecc8cb8b/s/master "wercker status")](https://app.wercker.com/project/byKey/8596cace912c9947dd9c8542ecc8cb8b)
+[![Join the chat at https://gitter.im/jinzhu/gorm](https://img.shields.io/gitter/room/jinzhu/gorm.svg)](https://gitter.im/jinzhu/gorm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[![Open Collective Backer](https://opencollective.com/gorm/tiers/backer/badge.svg?label=backer&color=brightgreen "Open Collective Backer")](https://opencollective.com/gorm)
+[![Open Collective Sponsor](https://opencollective.com/gorm/tiers/sponsor/badge.svg?label=sponsor&color=brightgreen "Open Collective Sponsor")](https://opencollective.com/gorm)
+[![MIT license](http://img.shields.io/badge/license-MIT-brightgreen.svg)](http://opensource.org/licenses/MIT)
+[![GoDoc](https://godoc.org/github.com/jinzhu/gorm?status.svg)](https://godoc.org/github.com/jinzhu/gorm)
+
+## Overview
+
+* Full-Featured ORM (almost)
+* Associations (Has One, Has Many, Belongs To, Many To Many, Polymorphism)
+* Hooks (Before/After Create/Save/Update/Delete/Find)
+* Preloading (eager loading)
+* Transactions
+* Composite Primary Key
+* SQL Builder
+* Auto Migrations
+* Logger
+* Extendable, write Plugins based on GORM callbacks
+* Every feature comes with tests
+* Developer Friendly
+
+## Getting Started
+
+* GORM Guides [http://gorm.io](http://gorm.io)
+
+## Contributing
+
+[You can help to deliver a better GORM, check out things you can do](http://gorm.io/contribute.html)
+
+## License
+
+© Jinzhu, 2013~time.Now
+
+Released under the [MIT License](https://github.com/jinzhu/gorm/blob/master/License)
diff --git a/vendor/github.com/jinzhu/gorm/association.go b/vendor/github.com/jinzhu/gorm/association.go
new file mode 100755 (executable)
index 0000000..a73344f
--- /dev/null
@@ -0,0 +1,377 @@
+package gorm
+
+import (
+       "errors"
+       "fmt"
+       "reflect"
+)
+
+// Association Mode contains some helper methods to handle relationship things easily.
+type Association struct {
+       Error  error
+       scope  *Scope
+       column string
+       field  *Field
+}
+
+// Find find out all related associations
+func (association *Association) Find(value interface{}) *Association {
+       association.scope.related(value, association.column)
+       return association.setErr(association.scope.db.Error)
+}
+
+// Append append new associations for many2many, has_many, replace current association for has_one, belongs_to
+func (association *Association) Append(values ...interface{}) *Association {
+       if association.Error != nil {
+               return association
+       }
+
+       if relationship := association.field.Relationship; relationship.Kind == "has_one" {
+               return association.Replace(values...)
+       }
+       return association.saveAssociations(values...)
+}
+
+// Replace replace current associations with new one
+func (association *Association) Replace(values ...interface{}) *Association {
+       if association.Error != nil {
+               return association
+       }
+
+       var (
+               relationship = association.field.Relationship
+               scope        = association.scope
+               field        = association.field.Field
+               newDB        = scope.NewDB()
+       )
+
+       // Append new values
+       association.field.Set(reflect.Zero(association.field.Field.Type()))
+       association.saveAssociations(values...)
+
+       // Belongs To
+       if relationship.Kind == "belongs_to" {
+               // Set foreign key to be null when clearing value (length equals 0)
+               if len(values) == 0 {
+                       // Set foreign key to be nil
+                       var foreignKeyMap = map[string]interface{}{}
+                       for _, foreignKey := range relationship.ForeignDBNames {
+                               foreignKeyMap[foreignKey] = nil
+                       }
+                       association.setErr(newDB.Model(scope.Value).UpdateColumn(foreignKeyMap).Error)
+               }
+       } else {
+               // Polymorphic Relations
+               if relationship.PolymorphicDBName != "" {
+                       newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue)
+               }
+
+               // Delete Relations except new created
+               if len(values) > 0 {
+                       var associationForeignFieldNames, associationForeignDBNames []string
+                       if relationship.Kind == "many_to_many" {
+                               // if many to many relations, get association fields name from association foreign keys
+                               associationScope := scope.New(reflect.New(field.Type()).Interface())
+                               for idx, dbName := range relationship.AssociationForeignFieldNames {
+                                       if field, ok := associationScope.FieldByName(dbName); ok {
+                                               associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
+                                               associationForeignDBNames = append(associationForeignDBNames, relationship.AssociationForeignDBNames[idx])
+                                       }
+                               }
+                       } else {
+                               // If has one/many relations, use primary keys
+                               for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() {
+                                       associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
+                                       associationForeignDBNames = append(associationForeignDBNames, field.DBName)
+                               }
+                       }
+
+                       newPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, field.Interface())
+
+                       if len(newPrimaryKeys) > 0 {
+                               sql := fmt.Sprintf("%v NOT IN (%v)", toQueryCondition(scope, associationForeignDBNames), toQueryMarks(newPrimaryKeys))
+                               newDB = newDB.Where(sql, toQueryValues(newPrimaryKeys)...)
+                       }
+               }
+
+               if relationship.Kind == "many_to_many" {
+                       // if many to many relations, delete related relations from join table
+                       var sourceForeignFieldNames []string
+
+                       for _, dbName := range relationship.ForeignFieldNames {
+                               if field, ok := scope.FieldByName(dbName); ok {
+                                       sourceForeignFieldNames = append(sourceForeignFieldNames, field.Name)
+                               }
+                       }
+
+                       if sourcePrimaryKeys := scope.getColumnAsArray(sourceForeignFieldNames, scope.Value); len(sourcePrimaryKeys) > 0 {
+                               newDB = newDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(sourcePrimaryKeys)), toQueryValues(sourcePrimaryKeys)...)
+
+                               association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB))
+                       }
+               } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" {
+                       // has_one or has_many relations, set foreign key to be nil (TODO or delete them?)
+                       var foreignKeyMap = map[string]interface{}{}
+                       for idx, foreignKey := range relationship.ForeignDBNames {
+                               foreignKeyMap[foreignKey] = nil
+                               if field, ok := scope.FieldByName(relationship.AssociationForeignFieldNames[idx]); ok {
+                                       newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
+                               }
+                       }
+
+                       fieldValue := reflect.New(association.field.Field.Type()).Interface()
+                       association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error)
+               }
+       }
+       return association
+}
+
+// Delete remove relationship between source & passed arguments, but won't delete those arguments
+func (association *Association) Delete(values ...interface{}) *Association {
+       if association.Error != nil {
+               return association
+       }
+
+       var (
+               relationship = association.field.Relationship
+               scope        = association.scope
+               field        = association.field.Field
+               newDB        = scope.NewDB()
+       )
+
+       if len(values) == 0 {
+               return association
+       }
+
+       var deletingResourcePrimaryFieldNames, deletingResourcePrimaryDBNames []string
+       for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() {
+               deletingResourcePrimaryFieldNames = append(deletingResourcePrimaryFieldNames, field.Name)
+               deletingResourcePrimaryDBNames = append(deletingResourcePrimaryDBNames, field.DBName)
+       }
+
+       deletingPrimaryKeys := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, values...)
+
+       if relationship.Kind == "many_to_many" {
+               // source value's foreign keys
+               for idx, foreignKey := range relationship.ForeignDBNames {
+                       if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok {
+                               newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
+                       }
+               }
+
+               // get association's foreign fields name
+               var associationScope = scope.New(reflect.New(field.Type()).Interface())
+               var associationForeignFieldNames []string
+               for _, associationDBName := range relationship.AssociationForeignFieldNames {
+                       if field, ok := associationScope.FieldByName(associationDBName); ok {
+                               associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
+                       }
+               }
+
+               // association value's foreign keys
+               deletingPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, values...)
+               sql := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(deletingPrimaryKeys))
+               newDB = newDB.Where(sql, toQueryValues(deletingPrimaryKeys)...)
+
+               association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB))
+       } else {
+               var foreignKeyMap = map[string]interface{}{}
+               for _, foreignKey := range relationship.ForeignDBNames {
+                       foreignKeyMap[foreignKey] = nil
+               }
+
+               if relationship.Kind == "belongs_to" {
+                       // find with deleting relation's foreign keys
+                       primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, values...)
+                       newDB = newDB.Where(
+                               fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
+                               toQueryValues(primaryKeys)...,
+                       )
+
+                       // set foreign key to be null if there are some records affected
+                       modelValue := reflect.New(scope.GetModelStruct().ModelType).Interface()
+                       if results := newDB.Model(modelValue).UpdateColumn(foreignKeyMap); results.Error == nil {
+                               if results.RowsAffected > 0 {
+                                       scope.updatedAttrsWithValues(foreignKeyMap)
+                               }
+                       } else {
+                               association.setErr(results.Error)
+                       }
+               } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" {
+                       // find all relations
+                       primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value)
+                       newDB = newDB.Where(
+                               fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
+                               toQueryValues(primaryKeys)...,
+                       )
+
+                       // only include those deleting relations
+                       newDB = newDB.Where(
+                               fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, deletingResourcePrimaryDBNames), toQueryMarks(deletingPrimaryKeys)),
+                               toQueryValues(deletingPrimaryKeys)...,
+                       )
+
+                       // set matched relation's foreign key to be null
+                       fieldValue := reflect.New(association.field.Field.Type()).Interface()
+                       association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error)
+               }
+       }
+
+       // Remove deleted records from source's field
+       if association.Error == nil {
+               if field.Kind() == reflect.Slice {
+                       leftValues := reflect.Zero(field.Type())
+
+                       for i := 0; i < field.Len(); i++ {
+                               reflectValue := field.Index(i)
+                               primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, reflectValue.Interface())[0]
+                               var isDeleted = false
+                               for _, pk := range deletingPrimaryKeys {
+                                       if equalAsString(primaryKey, pk) {
+                                               isDeleted = true
+                                               break
+                                       }
+                               }
+                               if !isDeleted {
+                                       leftValues = reflect.Append(leftValues, reflectValue)
+                               }
+                       }
+
+                       association.field.Set(leftValues)
+               } else if field.Kind() == reflect.Struct {
+                       primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, field.Interface())[0]
+                       for _, pk := range deletingPrimaryKeys {
+                               if equalAsString(primaryKey, pk) {
+                                       association.field.Set(reflect.Zero(field.Type()))
+                                       break
+                               }
+                       }
+               }
+       }
+
+       return association
+}
+
+// Clear remove relationship between source & current associations, won't delete those associations
+func (association *Association) Clear() *Association {
+       return association.Replace()
+}
+
+// Count return the count of current associations
+func (association *Association) Count() int {
+       var (
+               count        = 0
+               relationship = association.field.Relationship
+               scope        = association.scope
+               fieldValue   = association.field.Field.Interface()
+               query        = scope.DB()
+       )
+
+       switch relationship.Kind {
+       case "many_to_many":
+               query = relationship.JoinTableHandler.JoinWith(relationship.JoinTableHandler, query, scope.Value)
+       case "has_many", "has_one":
+               primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value)
+               query = query.Where(
+                       fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
+                       toQueryValues(primaryKeys)...,
+               )
+       case "belongs_to":
+               primaryKeys := scope.getColumnAsArray(relationship.ForeignFieldNames, scope.Value)
+               query = query.Where(
+                       fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(primaryKeys)),
+                       toQueryValues(primaryKeys)...,
+               )
+       }
+
+       if relationship.PolymorphicType != "" {
+               query = query.Where(
+                       fmt.Sprintf("%v.%v = ?", scope.New(fieldValue).QuotedTableName(), scope.Quote(relationship.PolymorphicDBName)),
+                       relationship.PolymorphicValue,
+               )
+       }
+
+       if err := query.Model(fieldValue).Count(&count).Error; err != nil {
+               association.Error = err
+       }
+       return count
+}
+
+// saveAssociations save passed values as associations
+func (association *Association) saveAssociations(values ...interface{}) *Association {
+       var (
+               scope        = association.scope
+               field        = association.field
+               relationship = field.Relationship
+       )
+
+       saveAssociation := func(reflectValue reflect.Value) {
+               // value has to been pointer
+               if reflectValue.Kind() != reflect.Ptr {
+                       reflectPtr := reflect.New(reflectValue.Type())
+                       reflectPtr.Elem().Set(reflectValue)
+                       reflectValue = reflectPtr
+               }
+
+               // value has to been saved for many2many
+               if relationship.Kind == "many_to_many" {
+                       if scope.New(reflectValue.Interface()).PrimaryKeyZero() {
+                               association.setErr(scope.NewDB().Save(reflectValue.Interface()).Error)
+                       }
+               }
+
+               // Assign Fields
+               var fieldType = field.Field.Type()
+               var setFieldBackToValue, setSliceFieldBackToValue bool
+               if reflectValue.Type().AssignableTo(fieldType) {
+                       field.Set(reflectValue)
+               } else if reflectValue.Type().Elem().AssignableTo(fieldType) {
+                       // if field's type is struct, then need to set value back to argument after save
+                       setFieldBackToValue = true
+                       field.Set(reflectValue.Elem())
+               } else if fieldType.Kind() == reflect.Slice {
+                       if reflectValue.Type().AssignableTo(fieldType.Elem()) {
+                               field.Set(reflect.Append(field.Field, reflectValue))
+                       } else if reflectValue.Type().Elem().AssignableTo(fieldType.Elem()) {
+                               // if field's type is slice of struct, then need to set value back to argument after save
+                               setSliceFieldBackToValue = true
+                               field.Set(reflect.Append(field.Field, reflectValue.Elem()))
+                       }
+               }
+
+               if relationship.Kind == "many_to_many" {
+                       association.setErr(relationship.JoinTableHandler.Add(relationship.JoinTableHandler, scope.NewDB(), scope.Value, reflectValue.Interface()))
+               } else {
+                       association.setErr(scope.NewDB().Select(field.Name).Save(scope.Value).Error)
+
+                       if setFieldBackToValue {
+                               reflectValue.Elem().Set(field.Field)
+                       } else if setSliceFieldBackToValue {
+                               reflectValue.Elem().Set(field.Field.Index(field.Field.Len() - 1))
+                       }
+               }
+       }
+
+       for _, value := range values {
+               reflectValue := reflect.ValueOf(value)
+               indirectReflectValue := reflect.Indirect(reflectValue)
+               if indirectReflectValue.Kind() == reflect.Struct {
+                       saveAssociation(reflectValue)
+               } else if indirectReflectValue.Kind() == reflect.Slice {
+                       for i := 0; i < indirectReflectValue.Len(); i++ {
+                               saveAssociation(indirectReflectValue.Index(i))
+                       }
+               } else {
+                       association.setErr(errors.New("invalid value type"))
+               }
+       }
+       return association
+}
+
+// setErr set error when the error is not nil. And return Association.
+func (association *Association) setErr(err error) *Association {
+       if err != nil {
+               association.Error = err
+       }
+       return association
+}
diff --git a/vendor/github.com/jinzhu/gorm/association_test.go b/vendor/github.com/jinzhu/gorm/association_test.go
new file mode 100755 (executable)
index 0000000..60d0cf4
--- /dev/null
@@ -0,0 +1,1050 @@
+package gorm_test
+
+import (
+       "fmt"
+       "os"
+       "reflect"
+       "sort"
+       "testing"
+
+       "github.com/jinzhu/gorm"
+)
+
+func TestBelongsTo(t *testing.T) {
+       post := Post{
+               Title:        "post belongs to",
+               Body:         "body belongs to",
+               Category:     Category{Name: "Category 1"},
+               MainCategory: Category{Name: "Main Category 1"},
+       }
+
+       if err := DB.Save(&post).Error; err != nil {
+               t.Error("Got errors when save post", err)
+       }
+
+       if post.Category.ID == 0 || post.MainCategory.ID == 0 {
+               t.Errorf("Category's primary key should be updated")
+       }
+
+       if post.CategoryId.Int64 == 0 || post.MainCategoryId == 0 {
+               t.Errorf("post's foreign key should be updated")
+       }
+
+       // Query
+       var category1 Category
+       DB.Model(&post).Association("Category").Find(&category1)
+       if category1.Name != "Category 1" {
+               t.Errorf("Query belongs to relations with Association")
+       }
+
+       var mainCategory1 Category
+       DB.Model(&post).Association("MainCategory").Find(&mainCategory1)
+       if mainCategory1.Name != "Main Category 1" {
+               t.Errorf("Query belongs to relations with Association")
+       }
+
+       var category11 Category
+       DB.Model(&post).Related(&category11)
+       if category11.Name != "Category 1" {
+               t.Errorf("Query belongs to relations with Related")
+       }
+
+       if DB.Model(&post).Association("Category").Count() != 1 {
+               t.Errorf("Post's category count should be 1")
+       }
+
+       if DB.Model(&post).Association("MainCategory").Count() != 1 {
+               t.Errorf("Post's main category count should be 1")
+       }
+
+       // Append
+       var category2 = Category{
+               Name: "Category 2",
+       }
+       DB.Model(&post).Association("Category").Append(&category2)
+
+       if category2.ID == 0 {
+               t.Errorf("Category should has ID when created with Append")
+       }
+
+       var category21 Category
+       DB.Model(&post).Related(&category21)
+
+       if category21.Name != "Category 2" {
+               t.Errorf("Category should be updated with Append")
+       }
+
+       if DB.Model(&post).Association("Category").Count() != 1 {
+               t.Errorf("Post's category count should be 1")
+       }
+
+       // Replace
+       var category3 = Category{
+               Name: "Category 3",
+       }
+       DB.Model(&post).Association("Category").Replace(&category3)
+
+       if category3.ID == 0 {
+               t.Errorf("Category should has ID when created with Replace")
+       }
+
+       var category31 Category
+       DB.Model(&post).Related(&category31)
+       if category31.Name != "Category 3" {
+               t.Errorf("Category should be updated with Replace")
+       }
+
+       if DB.Model(&post).Association("Category").Count() != 1 {
+               t.Errorf("Post's category count should be 1")
+       }
+
+       // Delete
+       DB.Model(&post).Association("Category").Delete(&category2)
+       if DB.Model(&post).Related(&Category{}).RecordNotFound() {
+               t.Errorf("Should not delete any category when Delete a unrelated Category")
+       }
+
+       if post.Category.Name == "" {
+               t.Errorf("Post's category should not be reseted when Delete a unrelated Category")
+       }
+
+       DB.Model(&post).Association("Category").Delete(&category3)
+
+       if post.Category.Name != "" {
+               t.Errorf("Post's category should be reseted after Delete")
+       }
+
+       var category41 Category
+       DB.Model(&post).Related(&category41)
+       if category41.Name != "" {
+               t.Errorf("Category should be deleted with Delete")
+       }
+
+       if count := DB.Model(&post).Association("Category").Count(); count != 0 {
+               t.Errorf("Post's category count should be 0 after Delete, but got %v", count)
+       }
+
+       // Clear
+       DB.Model(&post).Association("Category").Append(&Category{
+               Name: "Category 2",
+       })
+
+       if DB.Model(&post).Related(&Category{}).RecordNotFound() {
+               t.Errorf("Should find category after append")
+       }
+
+       if post.Category.Name == "" {
+               t.Errorf("Post's category should has value after Append")
+       }
+
+       DB.Model(&post).Association("Category").Clear()
+
+       if post.Category.Name != "" {
+               t.Errorf("Post's category should be cleared after Clear")
+       }
+
+       if !DB.Model(&post).Related(&Category{}).RecordNotFound() {
+               t.Errorf("Should not find any category after Clear")
+       }
+
+       if count := DB.Model(&post).Association("Category").Count(); count != 0 {
+               t.Errorf("Post's category count should be 0 after Clear, but got %v", count)
+       }
+
+       // Check Association mode with soft delete
+       category6 := Category{
+               Name: "Category 6",
+       }
+       DB.Model(&post).Association("Category").Append(&category6)
+
+       if count := DB.Model(&post).Association("Category").Count(); count != 1 {
+               t.Errorf("Post's category count should be 1 after Append, but got %v", count)
+       }
+
+       DB.Delete(&category6)
+
+       if count := DB.Model(&post).Association("Category").Count(); count != 0 {
+               t.Errorf("Post's category count should be 0 after the category has been deleted, but got %v", count)
+       }
+
+       if err := DB.Model(&post).Association("Category").Find(&Category{}).Error; err == nil {
+               t.Errorf("Post's category is not findable after Delete")
+       }
+
+       if count := DB.Unscoped().Model(&post).Association("Category").Count(); count != 1 {
+               t.Errorf("Post's category count should be 1 when query with Unscoped, but got %v", count)
+       }
+
+       if err := DB.Unscoped().Model(&post).Association("Category").Find(&Category{}).Error; err != nil {
+               t.Errorf("Post's category should be findable when query with Unscoped, got %v", err)
+       }
+}
+
+func TestBelongsToOverrideForeignKey1(t *testing.T) {
+       type Profile struct {
+               gorm.Model
+               Name string
+       }
+
+       type User struct {
+               gorm.Model
+               Profile      Profile `gorm:"ForeignKey:ProfileRefer"`
+               ProfileRefer int
+       }
+
+       if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
+               if relation.Relationship.Kind != "belongs_to" ||
+                       !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileRefer"}) ||
+                       !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
+                       t.Errorf("Override belongs to foreign key with tag")
+               }
+       }
+}
+
+func TestBelongsToOverrideForeignKey2(t *testing.T) {
+       type Profile struct {
+               gorm.Model
+               Refer string
+               Name  string
+       }
+
+       type User struct {
+               gorm.Model
+               Profile   Profile `gorm:"ForeignKey:ProfileID;AssociationForeignKey:Refer"`
+               ProfileID int
+       }
+
+       if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
+               if relation.Relationship.Kind != "belongs_to" ||
+                       !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileID"}) ||
+                       !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
+                       t.Errorf("Override belongs to foreign key with tag")
+               }
+       }
+}
+
+func TestHasOne(t *testing.T) {
+       user := User{
+               Name:       "has one",
+               CreditCard: CreditCard{Number: "411111111111"},
+       }
+
+       if err := DB.Save(&user).Error; err != nil {
+               t.Error("Got errors when save user", err.Error())
+       }
+
+       if user.CreditCard.UserId.Int64 == 0 {
+               t.Errorf("CreditCard's foreign key should be updated")
+       }
+
+       // Query
+       var creditCard1 CreditCard
+       DB.Model(&user).Related(&creditCard1)
+
+       if creditCard1.Number != "411111111111" {
+               t.Errorf("Query has one relations with Related")
+       }
+
+       var creditCard11 CreditCard
+       DB.Model(&user).Association("CreditCard").Find(&creditCard11)
+
+       if creditCard11.Number != "411111111111" {
+               t.Errorf("Query has one relations with Related")
+       }
+
+       if DB.Model(&user).Association("CreditCard").Count() != 1 {
+               t.Errorf("User's credit card count should be 1")
+       }
+
+       // Append
+       var creditcard2 = CreditCard{
+               Number: "411111111112",
+       }
+       DB.Model(&user).Association("CreditCard").Append(&creditcard2)
+
+       if creditcard2.ID == 0 {
+               t.Errorf("Creditcard should has ID when created with Append")
+       }
+
+       var creditcard21 CreditCard
+       DB.Model(&user).Related(&creditcard21)
+       if creditcard21.Number != "411111111112" {
+               t.Errorf("CreditCard should be updated with Append")
+       }
+
+       if DB.Model(&user).Association("CreditCard").Count() != 1 {
+               t.Errorf("User's credit card count should be 1")
+       }
+
+       // Replace
+       var creditcard3 = CreditCard{
+               Number: "411111111113",
+       }
+       DB.Model(&user).Association("CreditCard").Replace(&creditcard3)
+
+       if creditcard3.ID == 0 {
+               t.Errorf("Creditcard should has ID when created with Replace")
+       }
+
+       var creditcard31 CreditCard
+       DB.Model(&user).Related(&creditcard31)
+       if creditcard31.Number != "411111111113" {
+               t.Errorf("CreditCard should be updated with Replace")
+       }
+
+       if DB.Model(&user).Association("CreditCard").Count() != 1 {
+               t.Errorf("User's credit card count should be 1")
+       }
+
+       // Delete
+       DB.Model(&user).Association("CreditCard").Delete(&creditcard2)
+       var creditcard4 CreditCard
+       DB.Model(&user).Related(&creditcard4)
+       if creditcard4.Number != "411111111113" {
+               t.Errorf("Should not delete credit card when Delete a unrelated CreditCard")
+       }
+
+       if DB.Model(&user).Association("CreditCard").Count() != 1 {
+               t.Errorf("User's credit card count should be 1")
+       }
+
+       DB.Model(&user).Association("CreditCard").Delete(&creditcard3)
+       if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
+               t.Errorf("Should delete credit card with Delete")
+       }
+
+       if DB.Model(&user).Association("CreditCard").Count() != 0 {
+               t.Errorf("User's credit card count should be 0 after Delete")
+       }
+
+       // Clear
+       var creditcard5 = CreditCard{
+               Number: "411111111115",
+       }
+       DB.Model(&user).Association("CreditCard").Append(&creditcard5)
+
+       if DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
+               t.Errorf("Should added credit card with Append")
+       }
+
+       if DB.Model(&user).Association("CreditCard").Count() != 1 {
+               t.Errorf("User's credit card count should be 1")
+       }
+
+       DB.Model(&user).Association("CreditCard").Clear()
+       if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
+               t.Errorf("Credit card should be deleted with Clear")
+       }
+
+       if DB.Model(&user).Association("CreditCard").Count() != 0 {
+               t.Errorf("User's credit card count should be 0 after Clear")
+       }
+
+       // Check Association mode with soft delete
+       var creditcard6 = CreditCard{
+               Number: "411111111116",
+       }
+       DB.Model(&user).Association("CreditCard").Append(&creditcard6)
+
+       if count := DB.Model(&user).Association("CreditCard").Count(); count != 1 {
+               t.Errorf("User's credit card count should be 1 after Append, but got %v", count)
+       }
+
+       DB.Delete(&creditcard6)
+
+       if count := DB.Model(&user).Association("CreditCard").Count(); count != 0 {
+               t.Errorf("User's credit card count should be 0 after credit card deleted, but got %v", count)
+       }
+
+       if err := DB.Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err == nil {
+               t.Errorf("User's creditcard is not findable after Delete")
+       }
+
+       if count := DB.Unscoped().Model(&user).Association("CreditCard").Count(); count != 1 {
+               t.Errorf("User's credit card count should be 1 when query with Unscoped, but got %v", count)
+       }
+
+       if err := DB.Unscoped().Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err != nil {
+               t.Errorf("User's creditcard should be findable when query with Unscoped, got %v", err)
+       }
+}
+
+func TestHasOneOverrideForeignKey1(t *testing.T) {
+       type Profile struct {
+               gorm.Model
+               Name      string
+               UserRefer uint
+       }
+
+       type User struct {
+               gorm.Model
+               Profile Profile `gorm:"ForeignKey:UserRefer"`
+       }
+
+       if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
+               if relation.Relationship.Kind != "has_one" ||
+                       !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) ||
+                       !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
+                       t.Errorf("Override belongs to foreign key with tag")
+               }
+       }
+}
+
+func TestHasOneOverrideForeignKey2(t *testing.T) {
+       type Profile struct {
+               gorm.Model
+               Name   string
+               UserID uint
+       }
+
+       type User struct {
+               gorm.Model
+               Refer   string
+               Profile Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"`
+       }
+
+       if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
+               if relation.Relationship.Kind != "has_one" ||
+                       !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) ||
+                       !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
+                       t.Errorf("Override belongs to foreign key with tag")
+               }
+       }
+}
+
+func TestHasMany(t *testing.T) {
+       post := Post{
+               Title:    "post has many",
+               Body:     "body has many",
+               Comments: []*Comment{{Content: "Comment 1"}, {Content: "Comment 2"}},
+       }
+
+       if err := DB.Save(&post).Error; err != nil {
+               t.Error("Got errors when save post", err)
+       }
+
+       for _, comment := range post.Comments {
+               if comment.PostId == 0 {
+                       t.Errorf("comment's PostID should be updated")
+               }
+       }
+
+       var compareComments = func(comments []Comment, contents []string) bool {
+               var commentContents []string
+               for _, comment := range comments {
+                       commentContents = append(commentContents, comment.Content)
+               }
+               sort.Strings(commentContents)
+               sort.Strings(contents)
+               return reflect.DeepEqual(commentContents, contents)
+       }
+
+       // Query
+       if DB.First(&Comment{}, "content = ?", "Comment 1").Error != nil {
+               t.Errorf("Comment 1 should be saved")
+       }
+
+       var comments1 []Comment
+       DB.Model(&post).Association("Comments").Find(&comments1)
+       if !compareComments(comments1, []string{"Comment 1", "Comment 2"}) {
+               t.Errorf("Query has many relations with Association")
+       }
+
+       var comments11 []Comment
+       DB.Model(&post).Related(&comments11)
+       if !compareComments(comments11, []string{"Comment 1", "Comment 2"}) {
+               t.Errorf("Query has many relations with Related")
+       }
+
+       if DB.Model(&post).Association("Comments").Count() != 2 {
+               t.Errorf("Post's comments count should be 2")
+       }
+
+       // Append
+       DB.Model(&post).Association("Comments").Append(&Comment{Content: "Comment 3"})
+
+       var comments2 []Comment
+       DB.Model(&post).Related(&comments2)
+       if !compareComments(comments2, []string{"Comment 1", "Comment 2", "Comment 3"}) {
+               t.Errorf("Append new record to has many relations")
+       }
+
+       if DB.Model(&post).Association("Comments").Count() != 3 {
+               t.Errorf("Post's comments count should be 3 after Append")
+       }
+
+       // Delete
+       DB.Model(&post).Association("Comments").Delete(comments11)
+
+       var comments3 []Comment
+       DB.Model(&post).Related(&comments3)
+       if !compareComments(comments3, []string{"Comment 3"}) {
+               t.Errorf("Delete an existing resource for has many relations")
+       }
+
+       if DB.Model(&post).Association("Comments").Count() != 1 {
+               t.Errorf("Post's comments count should be 1 after Delete 2")
+       }
+
+       // Replace
+       DB.Model(&Post{Id: 999}).Association("Comments").Replace()
+
+       var comments4 []Comment
+       DB.Model(&post).Related(&comments4)
+       if len(comments4) == 0 {
+               t.Errorf("Replace for other resource should not clear all comments")
+       }
+
+       DB.Model(&post).Association("Comments").Replace(&Comment{Content: "Comment 4"}, &Comment{Content: "Comment 5"})
+
+       var comments41 []Comment
+       DB.Model(&post).Related(&comments41)
+       if !compareComments(comments41, []string{"Comment 4", "Comment 5"}) {
+               t.Errorf("Replace has many relations")
+       }
+
+       // Clear
+       DB.Model(&Post{Id: 999}).Association("Comments").Clear()
+
+       var comments5 []Comment
+       DB.Model(&post).Related(&comments5)
+       if len(comments5) == 0 {
+               t.Errorf("Clear should not clear all comments")
+       }
+
+       DB.Model(&post).Association("Comments").Clear()
+
+       var comments51 []Comment
+       DB.Model(&post).Related(&comments51)
+       if len(comments51) != 0 {
+               t.Errorf("Clear has many relations")
+       }
+
+       // Check Association mode with soft delete
+       var comment6 = Comment{
+               Content: "comment 6",
+       }
+       DB.Model(&post).Association("Comments").Append(&comment6)
+
+       if count := DB.Model(&post).Association("Comments").Count(); count != 1 {
+               t.Errorf("post's comments count should be 1 after Append, but got %v", count)
+       }
+
+       DB.Delete(&comment6)
+
+       if count := DB.Model(&post).Association("Comments").Count(); count != 0 {
+               t.Errorf("post's comments count should be 0 after comment been deleted, but got %v", count)
+       }
+
+       var comments6 []Comment
+       if DB.Model(&post).Association("Comments").Find(&comments6); len(comments6) != 0 {
+               t.Errorf("post's comments count should be 0 when find with Find, but got %v", len(comments6))
+       }
+
+       if count := DB.Unscoped().Model(&post).Association("Comments").Count(); count != 1 {
+               t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", count)
+       }
+
+       var comments61 []Comment
+       if DB.Unscoped().Model(&post).Association("Comments").Find(&comments61); len(comments61) != 1 {
+               t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", len(comments61))
+       }
+}
+
+func TestHasManyOverrideForeignKey1(t *testing.T) {
+       type Profile struct {
+               gorm.Model
+               Name      string
+               UserRefer uint
+       }
+
+       type User struct {
+               gorm.Model
+               Profile []Profile `gorm:"ForeignKey:UserRefer"`
+       }
+
+       if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
+               if relation.Relationship.Kind != "has_many" ||
+                       !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) ||
+                       !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
+                       t.Errorf("Override belongs to foreign key with tag")
+               }
+       }
+}
+
+func TestHasManyOverrideForeignKey2(t *testing.T) {
+       type Profile struct {
+               gorm.Model
+               Name   string
+               UserID uint
+       }
+
+       type User struct {
+               gorm.Model
+               Refer   string
+               Profile []Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"`
+       }
+
+       if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
+               if relation.Relationship.Kind != "has_many" ||
+                       !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) ||
+                       !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
+                       t.Errorf("Override belongs to foreign key with tag")
+               }
+       }
+}
+
+func TestManyToMany(t *testing.T) {
+       DB.Raw("delete from languages")
+       var languages = []Language{{Name: "ZH"}, {Name: "EN"}}
+       user := User{Name: "Many2Many", Languages: languages}
+       DB.Save(&user)
+
+       // Query
+       var newLanguages []Language
+       DB.Model(&user).Related(&newLanguages, "Languages")
+       if len(newLanguages) != len([]string{"ZH", "EN"}) {
+               t.Errorf("Query many to many relations")
+       }
+
+       DB.Model(&user).Association("Languages").Find(&newLanguages)
+       if len(newLanguages) != len([]string{"ZH", "EN"}) {
+               t.Errorf("Should be able to find many to many relations")
+       }
+
+       if DB.Model(&user).Association("Languages").Count() != len([]string{"ZH", "EN"}) {
+               t.Errorf("Count should return correct result")
+       }
+
+       // Append
+       DB.Model(&user).Association("Languages").Append(&Language{Name: "DE"})
+       if DB.Where("name = ?", "DE").First(&Language{}).RecordNotFound() {
+               t.Errorf("New record should be saved when append")
+       }
+
+       languageA := Language{Name: "AA"}
+       DB.Save(&languageA)
+       DB.Model(&User{Id: user.Id}).Association("Languages").Append(&languageA)
+
+       languageC := Language{Name: "CC"}
+       DB.Save(&languageC)
+       DB.Model(&user).Association("Languages").Append(&[]Language{{Name: "BB"}, languageC})
+
+       DB.Model(&User{Id: user.Id}).Association("Languages").Append(&[]Language{{Name: "DD"}, {Name: "EE"}})
+
+       totalLanguages := []string{"ZH", "EN", "DE", "AA", "BB", "CC", "DD", "EE"}
+
+       if DB.Model(&user).Association("Languages").Count() != len(totalLanguages) {
+               t.Errorf("All appended languages should be saved")
+       }
+
+       // Delete
+       user.Languages = []Language{}
+       DB.Model(&user).Association("Languages").Find(&user.Languages)
+
+       var language Language
+       DB.Where("name = ?", "EE").First(&language)
+       DB.Model(&user).Association("Languages").Delete(language, &language)
+
+       if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-1 || len(user.Languages) != len(totalLanguages)-1 {
+               t.Errorf("Relations should be deleted with Delete")
+       }
+       if DB.Where("name = ?", "EE").First(&Language{}).RecordNotFound() {
+               t.Errorf("Language EE should not be deleted")
+       }
+
+       DB.Where("name IN (?)", []string{"CC", "DD"}).Find(&languages)
+
+       user2 := User{Name: "Many2Many_User2", Languages: languages}
+       DB.Save(&user2)
+
+       DB.Model(&user).Association("Languages").Delete(languages, &languages)
+       if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-3 || len(user.Languages) != len(totalLanguages)-3 {
+               t.Errorf("Relations should be deleted with Delete")
+       }
+
+       if DB.Model(&user2).Association("Languages").Count() == 0 {
+               t.Errorf("Other user's relations should not be deleted")
+       }
+
+       // Replace
+       var languageB Language
+       DB.Where("name = ?", "BB").First(&languageB)
+       DB.Model(&user).Association("Languages").Replace(languageB)
+       if len(user.Languages) != 1 || DB.Model(&user).Association("Languages").Count() != 1 {
+               t.Errorf("Relations should be replaced")
+       }
+
+       DB.Model(&user).Association("Languages").Replace()
+       if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 {
+               t.Errorf("Relations should be replaced with empty")
+       }
+
+       DB.Model(&user).Association("Languages").Replace(&[]Language{{Name: "FF"}, {Name: "JJ"}})
+       if len(user.Languages) != 2 || DB.Model(&user).Association("Languages").Count() != len([]string{"FF", "JJ"}) {
+               t.Errorf("Relations should be replaced")
+       }
+
+       // Clear
+       DB.Model(&user).Association("Languages").Clear()
+       if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 {
+               t.Errorf("Relations should be cleared")
+       }
+
+       // Check Association mode with soft delete
+       var language6 = Language{
+               Name: "language 6",
+       }
+       DB.Model(&user).Association("Languages").Append(&language6)
+
+       if count := DB.Model(&user).Association("Languages").Count(); count != 1 {
+               t.Errorf("user's languages count should be 1 after Append, but got %v", count)
+       }
+
+       DB.Delete(&language6)
+
+       if count := DB.Model(&user).Association("Languages").Count(); count != 0 {
+               t.Errorf("user's languages count should be 0 after language been deleted, but got %v", count)
+       }
+
+       var languages6 []Language
+       if DB.Model(&user).Association("Languages").Find(&languages6); len(languages6) != 0 {
+               t.Errorf("user's languages count should be 0 when find with Find, but got %v", len(languages6))
+       }
+
+       if count := DB.Unscoped().Model(&user).Association("Languages").Count(); count != 1 {
+               t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", count)
+       }
+
+       var languages61 []Language
+       if DB.Unscoped().Model(&user).Association("Languages").Find(&languages61); len(languages61) != 1 {
+               t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", len(languages61))
+       }
+}
+
+func TestRelated(t *testing.T) {
+       user := User{
+               Name:            "jinzhu",
+               BillingAddress:  Address{Address1: "Billing Address - Address 1"},
+               ShippingAddress: Address{Address1: "Shipping Address - Address 1"},
+               Emails:          []Email{{Email: "jinzhu@example.com"}, {Email: "jinzhu-2@example@example.com"}},
+               CreditCard:      CreditCard{Number: "1234567890"},
+               Company:         Company{Name: "company1"},
+       }
+
+       if err := DB.Save(&user).Error; err != nil {
+               t.Errorf("No error should happen when saving user")
+       }
+
+       if user.CreditCard.ID == 0 {
+               t.Errorf("After user save, credit card should have id")
+       }
+
+       if user.BillingAddress.ID == 0 {
+               t.Errorf("After user save, billing address should have id")
+       }
+
+       if user.Emails[0].Id == 0 {
+               t.Errorf("After user save, billing address should have id")
+       }
+
+       var emails []Email
+       DB.Model(&user).Related(&emails)
+       if len(emails) != 2 {
+               t.Errorf("Should have two emails")
+       }
+
+       var emails2 []Email
+       DB.Model(&user).Where("email = ?", "jinzhu@example.com").Related(&emails2)
+       if len(emails2) != 1 {
+               t.Errorf("Should have two emails")
+       }
+
+       var emails3 []*Email
+       DB.Model(&user).Related(&emails3)
+       if len(emails3) != 2 {
+               t.Errorf("Should have two emails")
+       }
+
+       var user1 User
+       DB.Model(&user).Related(&user1.Emails)
+       if len(user1.Emails) != 2 {
+               t.Errorf("Should have only one email match related condition")
+       }
+
+       var address1 Address
+       DB.Model(&user).Related(&address1, "BillingAddressId")
+       if address1.Address1 != "Billing Address - Address 1" {
+               t.Errorf("Should get billing address from user correctly")
+       }
+
+       user1 = User{}
+       DB.Model(&address1).Related(&user1, "BillingAddressId")
+       if DB.NewRecord(user1) {
+               t.Errorf("Should get user from address correctly")
+       }
+
+       var user2 User
+       DB.Model(&emails[0]).Related(&user2)
+       if user2.Id != user.Id || user2.Name != user.Name {
+               t.Errorf("Should get user from email correctly")
+       }
+
+       var creditcard CreditCard
+       var user3 User
+       DB.First(&creditcard, "number = ?", "1234567890")
+       DB.Model(&creditcard).Related(&user3)
+       if user3.Id != user.Id || user3.Name != user.Name {
+               t.Errorf("Should get user from credit card correctly")
+       }
+
+       if !DB.Model(&CreditCard{}).Related(&User{}).RecordNotFound() {
+               t.Errorf("RecordNotFound for Related")
+       }
+
+       var company Company
+       if DB.Model(&user).Related(&company, "Company").RecordNotFound() || company.Name != "company1" {
+               t.Errorf("RecordNotFound for Related")
+       }
+}
+
+func TestForeignKey(t *testing.T) {
+       for _, structField := range DB.NewScope(&User{}).GetStructFields() {
+               for _, foreignKey := range []string{"BillingAddressID", "ShippingAddressId", "CompanyID"} {
+                       if structField.Name == foreignKey && !structField.IsForeignKey {
+                               t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
+                       }
+               }
+       }
+
+       for _, structField := range DB.NewScope(&Email{}).GetStructFields() {
+               for _, foreignKey := range []string{"UserId"} {
+                       if structField.Name == foreignKey && !structField.IsForeignKey {
+                               t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
+                       }
+               }
+       }
+
+       for _, structField := range DB.NewScope(&Post{}).GetStructFields() {
+               for _, foreignKey := range []string{"CategoryId", "MainCategoryId"} {
+                       if structField.Name == foreignKey && !structField.IsForeignKey {
+                               t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
+                       }
+               }
+       }
+
+       for _, structField := range DB.NewScope(&Comment{}).GetStructFields() {
+               for _, foreignKey := range []string{"PostId"} {
+                       if structField.Name == foreignKey && !structField.IsForeignKey {
+                               t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
+                       }
+               }
+       }
+}
+
+func testForeignKey(t *testing.T, source interface{}, sourceFieldName string, target interface{}, targetFieldName string) {
+       if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" {
+               // sqlite does not support ADD CONSTRAINT in ALTER TABLE
+               return
+       }
+       targetScope := DB.NewScope(target)
+       targetTableName := targetScope.TableName()
+       modelScope := DB.NewScope(source)
+       modelField, ok := modelScope.FieldByName(sourceFieldName)
+       if !ok {
+               t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", sourceFieldName))
+       }
+       targetField, ok := targetScope.FieldByName(targetFieldName)
+       if !ok {
+               t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", targetFieldName))
+       }
+       dest := fmt.Sprintf("%v(%v)", targetTableName, targetField.DBName)
+       err := DB.Model(source).AddForeignKey(modelField.DBName, dest, "CASCADE", "CASCADE").Error
+       if err != nil {
+               t.Fatalf(fmt.Sprintf("Failed to create foreign key: %v", err))
+       }
+}
+
+func TestLongForeignKey(t *testing.T) {
+       testForeignKey(t, &NotSoLongTableName{}, "ReallyLongThingID", &ReallyLongTableNameToTestMySQLNameLengthLimit{}, "ID")
+}
+
+func TestLongForeignKeyWithShortDest(t *testing.T) {
+       testForeignKey(t, &ReallyLongThingThatReferencesShort{}, "ShortID", &Short{}, "ID")
+}
+
+func TestHasManyChildrenWithOneStruct(t *testing.T) {
+       category := Category{
+               Name: "main",
+               Categories: []Category{
+                       {Name: "sub1"},
+                       {Name: "sub2"},
+               },
+       }
+
+       DB.Save(&category)
+}
+
+func TestAutoSaveBelongsToAssociation(t *testing.T) {
+       type Company struct {
+               gorm.Model
+               Name string
+       }
+
+       type User struct {
+               gorm.Model
+               Name      string
+               CompanyID uint
+               Company   Company `gorm:"association_autoupdate:false;association_autocreate:false;"`
+       }
+
+       DB.Where("name = ?", "auto_save_association").Delete(&Company{})
+       DB.AutoMigrate(&Company{}, &User{})
+
+       DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_association"}})
+
+       if !DB.Where("name = ?", "auto_save_association").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company auto_save_association should not have been saved when autosave is false")
+       }
+
+       // if foreign key is set, this should be saved even if association isn't
+       company := Company{Name: "auto_save_association"}
+       DB.Save(&company)
+
+       company.Name = "auto_save_association_new_name"
+       user := User{Name: "jinzhu", Company: company}
+
+       DB.Save(&user)
+
+       if !DB.Where("name = ?", "auto_save_association_new_name").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should not have been updated")
+       }
+
+       if DB.Where("id = ? AND company_id = ?", user.ID, company.ID).First(&User{}).RecordNotFound() {
+               t.Errorf("User's foreign key should have been saved")
+       }
+
+       user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_association_2"}}
+       DB.Set("gorm:association_autocreate", true).Save(&user2)
+       if DB.Where("name = ?", "auto_save_association_2").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company auto_save_association_2 should been created when autocreate is true")
+       }
+
+       user2.Company.Name = "auto_save_association_2_newname"
+       DB.Set("gorm:association_autoupdate", true).Save(&user2)
+
+       if DB.Where("name = ?", "auto_save_association_2_newname").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should been updated")
+       }
+}
+
+func TestAutoSaveHasOneAssociation(t *testing.T) {
+       type Company struct {
+               gorm.Model
+               UserID uint
+               Name   string
+       }
+
+       type User struct {
+               gorm.Model
+               Name    string
+               Company Company `gorm:"association_autoupdate:false;association_autocreate:false;"`
+       }
+
+       DB.Where("name = ?", "auto_save_has_one_association").Delete(&Company{})
+       DB.AutoMigrate(&Company{}, &User{})
+
+       DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_has_one_association"}})
+
+       if !DB.Where("name = ?", "auto_save_has_one_association").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company auto_save_has_one_association should not have been saved when autosave is false")
+       }
+
+       company := Company{Name: "auto_save_has_one_association"}
+       DB.Save(&company)
+
+       company.Name = "auto_save_has_one_association_new_name"
+       user := User{Name: "jinzhu", Company: company}
+
+       DB.Save(&user)
+
+       if !DB.Where("name = ?", "auto_save_has_one_association_new_name").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should not have been updated")
+       }
+
+       if !DB.Where("name = ? AND user_id = ?", "auto_save_has_one_association", user.ID).First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should not have been updated")
+       }
+
+       if user.Company.UserID == 0 {
+               t.Errorf("UserID should be assigned")
+       }
+
+       company.Name = "auto_save_has_one_association_2_new_name"
+       DB.Set("gorm:association_autoupdate", true).Save(&user)
+
+       if DB.Where("name = ? AND user_id = ?", "auto_save_has_one_association_new_name", user.ID).First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should been updated")
+       }
+
+       user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_has_one_association_2"}}
+       DB.Set("gorm:association_autocreate", true).Save(&user2)
+       if DB.Where("name = ?", "auto_save_has_one_association_2").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company auto_save_has_one_association_2 should been created when autocreate is true")
+       }
+}
+
+func TestAutoSaveMany2ManyAssociation(t *testing.T) {
+       type Company struct {
+               gorm.Model
+               Name string
+       }
+
+       type User struct {
+               gorm.Model
+               Name      string
+               Companies []Company `gorm:"many2many:user_companies;association_autoupdate:false;association_autocreate:false;"`
+       }
+
+       DB.AutoMigrate(&Company{}, &User{})
+
+       DB.Save(&User{Name: "jinzhu", Companies: []Company{{Name: "auto_save_m2m_association"}}})
+
+       if !DB.Where("name = ?", "auto_save_m2m_association").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company auto_save_m2m_association should not have been saved when autosave is false")
+       }
+
+       company := Company{Name: "auto_save_m2m_association"}
+       DB.Save(&company)
+
+       company.Name = "auto_save_m2m_association_new_name"
+       user := User{Name: "jinzhu", Companies: []Company{company, {Name: "auto_save_m2m_association_new_name_2"}}}
+
+       DB.Save(&user)
+
+       if !DB.Where("name = ?", "auto_save_m2m_association_new_name").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should not have been updated")
+       }
+
+       if !DB.Where("name = ?", "auto_save_m2m_association_new_name_2").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should not been created")
+       }
+
+       if DB.Model(&user).Association("Companies").Count() != 1 {
+               t.Errorf("Relationship should been saved")
+       }
+
+       DB.Set("gorm:association_autoupdate", true).Set("gorm:association_autocreate", true).Save(&user)
+
+       if DB.Where("name = ?", "auto_save_m2m_association_new_name").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should been updated")
+       }
+
+       if DB.Where("name = ?", "auto_save_m2m_association_new_name_2").First(&Company{}).RecordNotFound() {
+               t.Errorf("Company should been created")
+       }
+
+       if DB.Model(&user).Association("Companies").Count() != 2 {
+               t.Errorf("Relationship should been updated")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback.go b/vendor/github.com/jinzhu/gorm/callback.go
new file mode 100755 (executable)
index 0000000..a438214
--- /dev/null
@@ -0,0 +1,242 @@
+package gorm
+
+import "log"
+
+// DefaultCallback default callbacks defined by gorm
+var DefaultCallback = &Callback{}
+
+// Callback is a struct that contains all CRUD callbacks
+//   Field `creates` contains callbacks will be call when creating object
+//   Field `updates` contains callbacks will be call when updating object
+//   Field `deletes` contains callbacks will be call when deleting object
+//   Field `queries` contains callbacks will be call when querying object with query methods like Find, First, Related, Association...
+//   Field `rowQueries` contains callbacks will be call when querying object with Row, Rows...
+//   Field `processors` contains all callback processors, will be used to generate above callbacks in order
+type Callback struct {
+       creates    []*func(scope *Scope)
+       updates    []*func(scope *Scope)
+       deletes    []*func(scope *Scope)
+       queries    []*func(scope *Scope)
+       rowQueries []*func(scope *Scope)
+       processors []*CallbackProcessor
+}
+
+// CallbackProcessor contains callback informations
+type CallbackProcessor struct {
+       name      string              // current callback's name
+       before    string              // register current callback before a callback
+       after     string              // register current callback after a callback
+       replace   bool                // replace callbacks with same name
+       remove    bool                // delete callbacks with same name
+       kind      string              // callback type: create, update, delete, query, row_query
+       processor *func(scope *Scope) // callback handler
+       parent    *Callback
+}
+
+func (c *Callback) clone() *Callback {
+       return &Callback{
+               creates:    c.creates,
+               updates:    c.updates,
+               deletes:    c.deletes,
+               queries:    c.queries,
+               rowQueries: c.rowQueries,
+               processors: c.processors,
+       }
+}
+
+// Create could be used to register callbacks for creating object
+//     db.Callback().Create().After("gorm:create").Register("plugin:run_after_create", func(*Scope) {
+//       // business logic
+//       ...
+//
+//       // set error if some thing wrong happened, will rollback the creating
+//       scope.Err(errors.New("error"))
+//     })
+func (c *Callback) Create() *CallbackProcessor {
+       return &CallbackProcessor{kind: "create", parent: c}
+}
+
+// Update could be used to register callbacks for updating object, refer `Create` for usage
+func (c *Callback) Update() *CallbackProcessor {
+       return &CallbackProcessor{kind: "update", parent: c}
+}
+
+// Delete could be used to register callbacks for deleting object, refer `Create` for usage
+func (c *Callback) Delete() *CallbackProcessor {
+       return &CallbackProcessor{kind: "delete", parent: c}
+}
+
+// Query could be used to register callbacks for querying objects with query methods like `Find`, `First`, `Related`, `Association`...
+// Refer `Create` for usage
+func (c *Callback) Query() *CallbackProcessor {
+       return &CallbackProcessor{kind: "query", parent: c}
+}
+
+// RowQuery could be used to register callbacks for querying objects with `Row`, `Rows`, refer `Create` for usage
+func (c *Callback) RowQuery() *CallbackProcessor {
+       return &CallbackProcessor{kind: "row_query", parent: c}
+}
+
+// After insert a new callback after callback `callbackName`, refer `Callbacks.Create`
+func (cp *CallbackProcessor) After(callbackName string) *CallbackProcessor {
+       cp.after = callbackName
+       return cp
+}
+
+// Before insert a new callback before callback `callbackName`, refer `Callbacks.Create`
+func (cp *CallbackProcessor) Before(callbackName string) *CallbackProcessor {
+       cp.before = callbackName
+       return cp
+}
+
+// Register a new callback, refer `Callbacks.Create`
+func (cp *CallbackProcessor) Register(callbackName string, callback func(scope *Scope)) {
+       if cp.kind == "row_query" {
+               if cp.before == "" && cp.after == "" && callbackName != "gorm:row_query" {
+                       log.Printf("Registing RowQuery callback %v without specify order with Before(), After(), applying Before('gorm:row_query') by default for compatibility...\n", callbackName)
+                       cp.before = "gorm:row_query"
+               }
+       }
+
+       cp.name = callbackName
+       cp.processor = &callback
+       cp.parent.processors = append(cp.parent.processors, cp)
+       cp.parent.reorder()
+}
+
+// Remove a registered callback
+//     db.Callback().Create().Remove("gorm:update_time_stamp_when_create")
+func (cp *CallbackProcessor) Remove(callbackName string) {
+       log.Printf("[info] removing callback `%v` from %v\n", callbackName, fileWithLineNum())
+       cp.name = callbackName
+       cp.remove = true
+       cp.parent.processors = append(cp.parent.processors, cp)
+       cp.parent.reorder()
+}
+
+// Replace a registered callback with new callback
+//     db.Callback().Create().Replace("gorm:update_time_stamp_when_create", func(*Scope) {
+//                scope.SetColumn("Created", now)
+//                scope.SetColumn("Updated", now)
+//     })
+func (cp *CallbackProcessor) Replace(callbackName string, callback func(scope *Scope)) {
+       log.Printf("[info] replacing callback `%v` from %v\n", callbackName, fileWithLineNum())
+       cp.name = callbackName
+       cp.processor = &callback
+       cp.replace = true
+       cp.parent.processors = append(cp.parent.processors, cp)
+       cp.parent.reorder()
+}
+
+// Get registered callback
+//    db.Callback().Create().Get("gorm:create")
+func (cp *CallbackProcessor) Get(callbackName string) (callback func(scope *Scope)) {
+       for _, p := range cp.parent.processors {
+               if p.name == callbackName && p.kind == cp.kind && !cp.remove {
+                       return *p.processor
+               }
+       }
+       return nil
+}
+
+// getRIndex get right index from string slice
+func getRIndex(strs []string, str string) int {
+       for i := len(strs) - 1; i >= 0; i-- {
+               if strs[i] == str {
+                       return i
+               }
+       }
+       return -1
+}
+
+// sortProcessors sort callback processors based on its before, after, remove, replace
+func sortProcessors(cps []*CallbackProcessor) []*func(scope *Scope) {
+       var (
+               allNames, sortedNames []string
+               sortCallbackProcessor func(c *CallbackProcessor)
+       )
+
+       for _, cp := range cps {
+               // show warning message the callback name already exists
+               if index := getRIndex(allNames, cp.name); index > -1 && !cp.replace && !cp.remove {
+                       log.Printf("[warning] duplicated callback `%v` from %v\n", cp.name, fileWithLineNum())
+               }
+               allNames = append(allNames, cp.name)
+       }
+
+       sortCallbackProcessor = func(c *CallbackProcessor) {
+               if getRIndex(sortedNames, c.name) == -1 { // if not sorted
+                       if c.before != "" { // if defined before callback
+                               if index := getRIndex(sortedNames, c.before); index != -1 {
+                                       // if before callback already sorted, append current callback just after it
+                                       sortedNames = append(sortedNames[:index], append([]string{c.name}, sortedNames[index:]...)...)
+                               } else if index := getRIndex(allNames, c.before); index != -1 {
+                                       // if before callback exists but haven't sorted, append current callback to last
+                                       sortedNames = append(sortedNames, c.name)
+                                       sortCallbackProcessor(cps[index])
+                               }
+                       }
+
+                       if c.after != "" { // if defined after callback
+                               if index := getRIndex(sortedNames, c.after); index != -1 {
+                                       // if after callback already sorted, append current callback just before it
+                                       sortedNames = append(sortedNames[:index+1], append([]string{c.name}, sortedNames[index+1:]...)...)
+                               } else if index := getRIndex(allNames, c.after); index != -1 {
+                                       // if after callback exists but haven't sorted
+                                       cp := cps[index]
+                                       // set after callback's before callback to current callback
+                                       if cp.before == "" {
+                                               cp.before = c.name
+                                       }
+                                       sortCallbackProcessor(cp)
+                               }
+                       }
+
+                       // if current callback haven't been sorted, append it to last
+                       if getRIndex(sortedNames, c.name) == -1 {
+                               sortedNames = append(sortedNames, c.name)
+                       }
+               }
+       }
+
+       for _, cp := range cps {
+               sortCallbackProcessor(cp)
+       }
+
+       var sortedFuncs []*func(scope *Scope)
+       for _, name := range sortedNames {
+               if index := getRIndex(allNames, name); !cps[index].remove {
+                       sortedFuncs = append(sortedFuncs, cps[index].processor)
+               }
+       }
+
+       return sortedFuncs
+}
+
+// reorder all registered processors, and reset CRUD callbacks
+func (c *Callback) reorder() {
+       var creates, updates, deletes, queries, rowQueries []*CallbackProcessor
+
+       for _, processor := range c.processors {
+               if processor.name != "" {
+                       switch processor.kind {
+                       case "create":
+                               creates = append(creates, processor)
+                       case "update":
+                               updates = append(updates, processor)
+                       case "delete":
+                               deletes = append(deletes, processor)
+                       case "query":
+                               queries = append(queries, processor)
+                       case "row_query":
+                               rowQueries = append(rowQueries, processor)
+                       }
+               }
+       }
+
+       c.creates = sortProcessors(creates)
+       c.updates = sortProcessors(updates)
+       c.deletes = sortProcessors(deletes)
+       c.queries = sortProcessors(queries)
+       c.rowQueries = sortProcessors(rowQueries)
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_create.go b/vendor/github.com/jinzhu/gorm/callback_create.go
new file mode 100755 (executable)
index 0000000..2ab05d3
--- /dev/null
@@ -0,0 +1,164 @@
+package gorm
+
+import (
+       "fmt"
+       "strings"
+)
+
+// Define callbacks for creating
+func init() {
+       DefaultCallback.Create().Register("gorm:begin_transaction", beginTransactionCallback)
+       DefaultCallback.Create().Register("gorm:before_create", beforeCreateCallback)
+       DefaultCallback.Create().Register("gorm:save_before_associations", saveBeforeAssociationsCallback)
+       DefaultCallback.Create().Register("gorm:update_time_stamp", updateTimeStampForCreateCallback)
+       DefaultCallback.Create().Register("gorm:create", createCallback)
+       DefaultCallback.Create().Register("gorm:force_reload_after_create", forceReloadAfterCreateCallback)
+       DefaultCallback.Create().Register("gorm:save_after_associations", saveAfterAssociationsCallback)
+       DefaultCallback.Create().Register("gorm:after_create", afterCreateCallback)
+       DefaultCallback.Create().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
+}
+
+// beforeCreateCallback will invoke `BeforeSave`, `BeforeCreate` method before creating
+func beforeCreateCallback(scope *Scope) {
+       if !scope.HasError() {
+               scope.CallMethod("BeforeSave")
+       }
+       if !scope.HasError() {
+               scope.CallMethod("BeforeCreate")
+       }
+}
+
+// updateTimeStampForCreateCallback will set `CreatedAt`, `UpdatedAt` when creating
+func updateTimeStampForCreateCallback(scope *Scope) {
+       if !scope.HasError() {
+               now := NowFunc()
+
+               if createdAtField, ok := scope.FieldByName("CreatedAt"); ok {
+                       if createdAtField.IsBlank {
+                               createdAtField.Set(now)
+                       }
+               }
+
+               if updatedAtField, ok := scope.FieldByName("UpdatedAt"); ok {
+                       if updatedAtField.IsBlank {
+                               updatedAtField.Set(now)
+                       }
+               }
+       }
+}
+
+// createCallback the callback used to insert data into database
+func createCallback(scope *Scope) {
+       if !scope.HasError() {
+               defer scope.trace(NowFunc())
+
+               var (
+                       columns, placeholders        []string
+                       blankColumnsWithDefaultValue []string
+               )
+
+               for _, field := range scope.Fields() {
+                       if scope.changeableField(field) {
+                               if field.IsNormal && !field.IsIgnored {
+                                       if field.IsBlank && field.HasDefaultValue {
+                                               blankColumnsWithDefaultValue = append(blankColumnsWithDefaultValue, scope.Quote(field.DBName))
+                                               scope.InstanceSet("gorm:blank_columns_with_default_value", blankColumnsWithDefaultValue)
+                                       } else if !field.IsPrimaryKey || !field.IsBlank {
+                                               columns = append(columns, scope.Quote(field.DBName))
+                                               placeholders = append(placeholders, scope.AddToVars(field.Field.Interface()))
+                                       }
+                               } else if field.Relationship != nil && field.Relationship.Kind == "belongs_to" {
+                                       for _, foreignKey := range field.Relationship.ForeignDBNames {
+                                               if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) {
+                                                       columns = append(columns, scope.Quote(foreignField.DBName))
+                                                       placeholders = append(placeholders, scope.AddToVars(foreignField.Field.Interface()))
+                                               }
+                                       }
+                               }
+                       }
+               }
+
+               var (
+                       returningColumn = "*"
+                       quotedTableName = scope.QuotedTableName()
+                       primaryField    = scope.PrimaryField()
+                       extraOption     string
+               )
+
+               if str, ok := scope.Get("gorm:insert_option"); ok {
+                       extraOption = fmt.Sprint(str)
+               }
+
+               if primaryField != nil {
+                       returningColumn = scope.Quote(primaryField.DBName)
+               }
+
+               lastInsertIDReturningSuffix := scope.Dialect().LastInsertIDReturningSuffix(quotedTableName, returningColumn)
+
+               if len(columns) == 0 {
+                       scope.Raw(fmt.Sprintf(
+                               "INSERT INTO %v %v%v%v",
+                               quotedTableName,
+                               scope.Dialect().DefaultValueStr(),
+                               addExtraSpaceIfExist(extraOption),
+                               addExtraSpaceIfExist(lastInsertIDReturningSuffix),
+                       ))
+               } else {
+                       scope.Raw(fmt.Sprintf(
+                               "INSERT INTO %v (%v) VALUES (%v)%v%v",
+                               scope.QuotedTableName(),
+                               strings.Join(columns, ","),
+                               strings.Join(placeholders, ","),
+                               addExtraSpaceIfExist(extraOption),
+                               addExtraSpaceIfExist(lastInsertIDReturningSuffix),
+                       ))
+               }
+
+               // execute create sql
+               if lastInsertIDReturningSuffix == "" || primaryField == nil {
+                       if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
+                               // set rows affected count
+                               scope.db.RowsAffected, _ = result.RowsAffected()
+
+                               // set primary value to primary field
+                               if primaryField != nil && primaryField.IsBlank {
+                                       if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil {
+                                               scope.Err(primaryField.Set(primaryValue))
+                                       }
+                               }
+                       }
+               } else {
+                       if primaryField.Field.CanAddr() {
+                               if err := scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...).Scan(primaryField.Field.Addr().Interface()); scope.Err(err) == nil {
+                                       primaryField.IsBlank = false
+                                       scope.db.RowsAffected = 1
+                               }
+                       } else {
+                               scope.Err(ErrUnaddressable)
+                       }
+               }
+       }
+}
+
+// forceReloadAfterCreateCallback will reload columns that having default value, and set it back to current object
+func forceReloadAfterCreateCallback(scope *Scope) {
+       if blankColumnsWithDefaultValue, ok := scope.InstanceGet("gorm:blank_columns_with_default_value"); ok {
+               db := scope.DB().New().Table(scope.TableName()).Select(blankColumnsWithDefaultValue.([]string))
+               for _, field := range scope.Fields() {
+                       if field.IsPrimaryKey && !field.IsBlank {
+                               db = db.Where(fmt.Sprintf("%v = ?", field.DBName), field.Field.Interface())
+                       }
+               }
+               db.Scan(scope.Value)
+       }
+}
+
+// afterCreateCallback will invoke `AfterCreate`, `AfterSave` method after creating
+func afterCreateCallback(scope *Scope) {
+       if !scope.HasError() {
+               scope.CallMethod("AfterCreate")
+       }
+       if !scope.HasError() {
+               scope.CallMethod("AfterSave")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_delete.go b/vendor/github.com/jinzhu/gorm/callback_delete.go
new file mode 100755 (executable)
index 0000000..73d9088
--- /dev/null
@@ -0,0 +1,63 @@
+package gorm
+
+import (
+       "errors"
+       "fmt"
+)
+
+// Define callbacks for deleting
+func init() {
+       DefaultCallback.Delete().Register("gorm:begin_transaction", beginTransactionCallback)
+       DefaultCallback.Delete().Register("gorm:before_delete", beforeDeleteCallback)
+       DefaultCallback.Delete().Register("gorm:delete", deleteCallback)
+       DefaultCallback.Delete().Register("gorm:after_delete", afterDeleteCallback)
+       DefaultCallback.Delete().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
+}
+
+// beforeDeleteCallback will invoke `BeforeDelete` method before deleting
+func beforeDeleteCallback(scope *Scope) {
+       if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() {
+               scope.Err(errors.New("Missing WHERE clause while deleting"))
+               return
+       }
+       if !scope.HasError() {
+               scope.CallMethod("BeforeDelete")
+       }
+}
+
+// deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete)
+func deleteCallback(scope *Scope) {
+       if !scope.HasError() {
+               var extraOption string
+               if str, ok := scope.Get("gorm:delete_option"); ok {
+                       extraOption = fmt.Sprint(str)
+               }
+
+               deletedAtField, hasDeletedAtField := scope.FieldByName("DeletedAt")
+
+               if !scope.Search.Unscoped && hasDeletedAtField {
+                       scope.Raw(fmt.Sprintf(
+                               "UPDATE %v SET %v=%v%v%v",
+                               scope.QuotedTableName(),
+                               scope.Quote(deletedAtField.DBName),
+                               scope.AddToVars(NowFunc()),
+                               addExtraSpaceIfExist(scope.CombinedConditionSql()),
+                               addExtraSpaceIfExist(extraOption),
+                       )).Exec()
+               } else {
+                       scope.Raw(fmt.Sprintf(
+                               "DELETE FROM %v%v%v",
+                               scope.QuotedTableName(),
+                               addExtraSpaceIfExist(scope.CombinedConditionSql()),
+                               addExtraSpaceIfExist(extraOption),
+                       )).Exec()
+               }
+       }
+}
+
+// afterDeleteCallback will invoke `AfterDelete` method after deleting
+func afterDeleteCallback(scope *Scope) {
+       if !scope.HasError() {
+               scope.CallMethod("AfterDelete")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_query.go b/vendor/github.com/jinzhu/gorm/callback_query.go
new file mode 100755 (executable)
index 0000000..593e5d3
--- /dev/null
@@ -0,0 +1,104 @@
+package gorm
+
+import (
+       "errors"
+       "fmt"
+       "reflect"
+)
+
+// Define callbacks for querying
+func init() {
+       DefaultCallback.Query().Register("gorm:query", queryCallback)
+       DefaultCallback.Query().Register("gorm:preload", preloadCallback)
+       DefaultCallback.Query().Register("gorm:after_query", afterQueryCallback)
+}
+
+// queryCallback used to query data from database
+func queryCallback(scope *Scope) {
+       if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
+               return
+       }
+       
+       //we are only preloading relations, dont touch base model
+       if _, skip := scope.InstanceGet("gorm:only_preload"); skip {
+               return
+       }
+
+       defer scope.trace(NowFunc())
+
+       var (
+               isSlice, isPtr bool
+               resultType     reflect.Type
+               results        = scope.IndirectValue()
+       )
+
+       if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok {
+               if primaryField := scope.PrimaryField(); primaryField != nil {
+                       scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryField.DBName), orderBy))
+               }
+       }
+
+       if value, ok := scope.Get("gorm:query_destination"); ok {
+               results = indirect(reflect.ValueOf(value))
+       }
+
+       if kind := results.Kind(); kind == reflect.Slice {
+               isSlice = true
+               resultType = results.Type().Elem()
+               results.Set(reflect.MakeSlice(results.Type(), 0, 0))
+
+               if resultType.Kind() == reflect.Ptr {
+                       isPtr = true
+                       resultType = resultType.Elem()
+               }
+       } else if kind != reflect.Struct {
+               scope.Err(errors.New("unsupported destination, should be slice or struct"))
+               return
+       }
+
+       scope.prepareQuerySQL()
+
+       if !scope.HasError() {
+               scope.db.RowsAffected = 0
+               if str, ok := scope.Get("gorm:query_option"); ok {
+                       scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str))
+               }
+
+               if rows, err := scope.SQLDB().Query(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
+                       defer rows.Close()
+
+                       columns, _ := rows.Columns()
+                       for rows.Next() {
+                               scope.db.RowsAffected++
+
+                               elem := results
+                               if isSlice {
+                                       elem = reflect.New(resultType).Elem()
+                               }
+
+                               scope.scan(rows, columns, scope.New(elem.Addr().Interface()).Fields())
+
+                               if isSlice {
+                                       if isPtr {
+                                               results.Set(reflect.Append(results, elem.Addr()))
+                                       } else {
+                                               results.Set(reflect.Append(results, elem))
+                                       }
+                               }
+                       }
+
+                       if err := rows.Err(); err != nil {
+                               scope.Err(err)
+                       } else if scope.db.RowsAffected == 0 && !isSlice {
+                               scope.Err(ErrRecordNotFound)
+                       }
+               }
+       }
+}
+
+// afterQueryCallback will invoke `AfterFind` method after querying
+func afterQueryCallback(scope *Scope) {
+       if !scope.HasError() {
+               scope.CallMethod("AfterFind")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_query_preload.go b/vendor/github.com/jinzhu/gorm/callback_query_preload.go
new file mode 100755 (executable)
index 0000000..d7c8a13
--- /dev/null
@@ -0,0 +1,404 @@
+package gorm
+
+import (
+       "errors"
+       "fmt"
+       "reflect"
+       "strconv"
+       "strings"
+)
+
+// preloadCallback used to preload associations
+func preloadCallback(scope *Scope) {
+       if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
+               return
+       }
+
+       if ap, ok := scope.Get("gorm:auto_preload"); ok {
+               // If gorm:auto_preload IS NOT a bool then auto preload.
+               // Else if it IS a bool, use the value
+               if apb, ok := ap.(bool); !ok {
+                       autoPreload(scope)
+               } else if apb {
+                       autoPreload(scope)
+               }
+       }
+
+       if scope.Search.preload == nil || scope.HasError() {
+               return
+       }
+
+       var (
+               preloadedMap = map[string]bool{}
+               fields       = scope.Fields()
+       )
+
+       for _, preload := range scope.Search.preload {
+               var (
+                       preloadFields = strings.Split(preload.schema, ".")
+                       currentScope  = scope
+                       currentFields = fields
+               )
+
+               for idx, preloadField := range preloadFields {
+                       var currentPreloadConditions []interface{}
+
+                       if currentScope == nil {
+                               continue
+                       }
+
+                       // if not preloaded
+                       if preloadKey := strings.Join(preloadFields[:idx+1], "."); !preloadedMap[preloadKey] {
+
+                               // assign search conditions to last preload
+                               if idx == len(preloadFields)-1 {
+                                       currentPreloadConditions = preload.conditions
+                               }
+
+                               for _, field := range currentFields {
+                                       if field.Name != preloadField || field.Relationship == nil {
+                                               continue
+                                       }
+
+                                       switch field.Relationship.Kind {
+                                       case "has_one":
+                                               currentScope.handleHasOnePreload(field, currentPreloadConditions)
+                                       case "has_many":
+                                               currentScope.handleHasManyPreload(field, currentPreloadConditions)
+                                       case "belongs_to":
+                                               currentScope.handleBelongsToPreload(field, currentPreloadConditions)
+                                       case "many_to_many":
+                                               currentScope.handleManyToManyPreload(field, currentPreloadConditions)
+                                       default:
+                                               scope.Err(errors.New("unsupported relation"))
+                                       }
+
+                                       preloadedMap[preloadKey] = true
+                                       break
+                               }
+
+                               if !preloadedMap[preloadKey] {
+                                       scope.Err(fmt.Errorf("can't preload field %s for %s", preloadField, currentScope.GetModelStruct().ModelType))
+                                       return
+                               }
+                       }
+
+                       // preload next level
+                       if idx < len(preloadFields)-1 {
+                               currentScope = currentScope.getColumnAsScope(preloadField)
+                               if currentScope != nil {
+                                       currentFields = currentScope.Fields()
+                               }
+                       }
+               }
+       }
+}
+
+func autoPreload(scope *Scope) {
+       for _, field := range scope.Fields() {
+               if field.Relationship == nil {
+                       continue
+               }
+
+               if val, ok := field.TagSettingsGet("PRELOAD"); ok {
+                       if preload, err := strconv.ParseBool(val); err != nil {
+                               scope.Err(errors.New("invalid preload option"))
+                               return
+                       } else if !preload {
+                               continue
+                       }
+               }
+
+               scope.Search.Preload(field.Name)
+       }
+}
+
+func (scope *Scope) generatePreloadDBWithConditions(conditions []interface{}) (*DB, []interface{}) {
+       var (
+               preloadDB         = scope.NewDB()
+               preloadConditions []interface{}
+       )
+
+       for _, condition := range conditions {
+               if scopes, ok := condition.(func(*DB) *DB); ok {
+                       preloadDB = scopes(preloadDB)
+               } else {
+                       preloadConditions = append(preloadConditions, condition)
+               }
+       }
+
+       return preloadDB, preloadConditions
+}
+
+// handleHasOnePreload used to preload has one associations
+func (scope *Scope) handleHasOnePreload(field *Field, conditions []interface{}) {
+       relation := field.Relationship
+
+       // get relations's primary keys
+       primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value)
+       if len(primaryKeys) == 0 {
+               return
+       }
+
+       // preload conditions
+       preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
+
+       // find relations
+       query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys))
+       values := toQueryValues(primaryKeys)
+       if relation.PolymorphicType != "" {
+               query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName))
+               values = append(values, relation.PolymorphicValue)
+       }
+
+       results := makeSlice(field.Struct.Type)
+       scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error)
+
+       // assign find results
+       var (
+               resultsValue       = indirect(reflect.ValueOf(results))
+               indirectScopeValue = scope.IndirectValue()
+       )
+
+       if indirectScopeValue.Kind() == reflect.Slice {
+               foreignValuesToResults := make(map[string]reflect.Value)
+               for i := 0; i < resultsValue.Len(); i++ {
+                       result := resultsValue.Index(i)
+                       foreignValues := toString(getValueFromFields(result, relation.ForeignFieldNames))
+                       foreignValuesToResults[foreignValues] = result
+               }
+               for j := 0; j < indirectScopeValue.Len(); j++ {
+                       indirectValue := indirect(indirectScopeValue.Index(j))
+                       valueString := toString(getValueFromFields(indirectValue, relation.AssociationForeignFieldNames))
+                       if result, found := foreignValuesToResults[valueString]; found {
+                               indirectValue.FieldByName(field.Name).Set(result)
+                       }
+               }
+       } else {
+               for i := 0; i < resultsValue.Len(); i++ {
+                       result := resultsValue.Index(i)
+                       scope.Err(field.Set(result))
+               }
+       }
+}
+
+// handleHasManyPreload used to preload has many associations
+func (scope *Scope) handleHasManyPreload(field *Field, conditions []interface{}) {
+       relation := field.Relationship
+
+       // get relations's primary keys
+       primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value)
+       if len(primaryKeys) == 0 {
+               return
+       }
+
+       // preload conditions
+       preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
+
+       // find relations
+       query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys))
+       values := toQueryValues(primaryKeys)
+       if relation.PolymorphicType != "" {
+               query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName))
+               values = append(values, relation.PolymorphicValue)
+       }
+
+       results := makeSlice(field.Struct.Type)
+       scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error)
+
+       // assign find results
+       var (
+               resultsValue       = indirect(reflect.ValueOf(results))
+               indirectScopeValue = scope.IndirectValue()
+       )
+
+       if indirectScopeValue.Kind() == reflect.Slice {
+               preloadMap := make(map[string][]reflect.Value)
+               for i := 0; i < resultsValue.Len(); i++ {
+                       result := resultsValue.Index(i)
+                       foreignValues := getValueFromFields(result, relation.ForeignFieldNames)
+                       preloadMap[toString(foreignValues)] = append(preloadMap[toString(foreignValues)], result)
+               }
+
+               for j := 0; j < indirectScopeValue.Len(); j++ {
+                       object := indirect(indirectScopeValue.Index(j))
+                       objectRealValue := getValueFromFields(object, relation.AssociationForeignFieldNames)
+                       f := object.FieldByName(field.Name)
+                       if results, ok := preloadMap[toString(objectRealValue)]; ok {
+                               f.Set(reflect.Append(f, results...))
+                       } else {
+                               f.Set(reflect.MakeSlice(f.Type(), 0, 0))
+                       }
+               }
+       } else {
+               scope.Err(field.Set(resultsValue))
+       }
+}
+
+// handleBelongsToPreload used to preload belongs to associations
+func (scope *Scope) handleBelongsToPreload(field *Field, conditions []interface{}) {
+       relation := field.Relationship
+
+       // preload conditions
+       preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
+
+       // get relations's primary keys
+       primaryKeys := scope.getColumnAsArray(relation.ForeignFieldNames, scope.Value)
+       if len(primaryKeys) == 0 {
+               return
+       }
+
+       // find relations
+       results := makeSlice(field.Struct.Type)
+       scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.AssociationForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error)
+
+       // assign find results
+       var (
+               resultsValue       = indirect(reflect.ValueOf(results))
+               indirectScopeValue = scope.IndirectValue()
+       )
+
+       foreignFieldToObjects := make(map[string][]*reflect.Value)
+       if indirectScopeValue.Kind() == reflect.Slice {
+               for j := 0; j < indirectScopeValue.Len(); j++ {
+                       object := indirect(indirectScopeValue.Index(j))
+                       valueString := toString(getValueFromFields(object, relation.ForeignFieldNames))
+                       foreignFieldToObjects[valueString] = append(foreignFieldToObjects[valueString], &object)
+               }
+       }
+
+       for i := 0; i < resultsValue.Len(); i++ {
+               result := resultsValue.Index(i)
+               if indirectScopeValue.Kind() == reflect.Slice {
+                       valueString := toString(getValueFromFields(result, relation.AssociationForeignFieldNames))
+                       if objects, found := foreignFieldToObjects[valueString]; found {
+                               for _, object := range objects {
+                                       object.FieldByName(field.Name).Set(result)
+                               }
+                       }
+               } else {
+                       scope.Err(field.Set(result))
+               }
+       }
+}
+
+// handleManyToManyPreload used to preload many to many associations
+func (scope *Scope) handleManyToManyPreload(field *Field, conditions []interface{}) {
+       var (
+               relation         = field.Relationship
+               joinTableHandler = relation.JoinTableHandler
+               fieldType        = field.Struct.Type.Elem()
+               foreignKeyValue  interface{}
+               foreignKeyType   = reflect.ValueOf(&foreignKeyValue).Type()
+               linkHash         = map[string][]reflect.Value{}
+               isPtr            bool
+       )
+
+       if fieldType.Kind() == reflect.Ptr {
+               isPtr = true
+               fieldType = fieldType.Elem()
+       }
+
+       var sourceKeys = []string{}
+       for _, key := range joinTableHandler.SourceForeignKeys() {
+               sourceKeys = append(sourceKeys, key.DBName)
+       }
+
+       // preload conditions
+       preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
+
+       // generate query with join table
+       newScope := scope.New(reflect.New(fieldType).Interface())
+       preloadDB = preloadDB.Table(newScope.TableName()).Model(newScope.Value)
+
+       if len(preloadDB.search.selects) == 0 {
+               preloadDB = preloadDB.Select("*")
+       }
+
+       preloadDB = joinTableHandler.JoinWith(joinTableHandler, preloadDB, scope.Value)
+
+       // preload inline conditions
+       if len(preloadConditions) > 0 {
+               preloadDB = preloadDB.Where(preloadConditions[0], preloadConditions[1:]...)
+       }
+
+       rows, err := preloadDB.Rows()
+
+       if scope.Err(err) != nil {
+               return
+       }
+       defer rows.Close()
+
+       columns, _ := rows.Columns()
+       for rows.Next() {
+               var (
+                       elem   = reflect.New(fieldType).Elem()
+                       fields = scope.New(elem.Addr().Interface()).Fields()
+               )
+
+               // register foreign keys in join tables
+               var joinTableFields []*Field
+               for _, sourceKey := range sourceKeys {
+                       joinTableFields = append(joinTableFields, &Field{StructField: &StructField{DBName: sourceKey, IsNormal: true}, Field: reflect.New(foreignKeyType).Elem()})
+               }
+
+               scope.scan(rows, columns, append(fields, joinTableFields...))
+
+               scope.New(elem.Addr().Interface()).
+                       InstanceSet("gorm:skip_query_callback", true).
+                       callCallbacks(scope.db.parent.callbacks.queries)
+
+               var foreignKeys = make([]interface{}, len(sourceKeys))
+               // generate hashed forkey keys in join table
+               for idx, joinTableField := range joinTableFields {
+                       if !joinTableField.Field.IsNil() {
+                               foreignKeys[idx] = joinTableField.Field.Elem().Interface()
+                       }
+               }
+               hashedSourceKeys := toString(foreignKeys)
+
+               if isPtr {
+                       linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem.Addr())
+               } else {
+                       linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem)
+               }
+       }
+
+       if err := rows.Err(); err != nil {
+               scope.Err(err)
+       }
+
+       // assign find results
+       var (
+               indirectScopeValue = scope.IndirectValue()
+               fieldsSourceMap    = map[string][]reflect.Value{}
+               foreignFieldNames  = []string{}
+       )
+
+       for _, dbName := range relation.ForeignFieldNames {
+               if field, ok := scope.FieldByName(dbName); ok {
+                       foreignFieldNames = append(foreignFieldNames, field.Name)
+               }
+       }
+
+       if indirectScopeValue.Kind() == reflect.Slice {
+               for j := 0; j < indirectScopeValue.Len(); j++ {
+                       object := indirect(indirectScopeValue.Index(j))
+                       key := toString(getValueFromFields(object, foreignFieldNames))
+                       fieldsSourceMap[key] = append(fieldsSourceMap[key], object.FieldByName(field.Name))
+               }
+       } else if indirectScopeValue.IsValid() {
+               key := toString(getValueFromFields(indirectScopeValue, foreignFieldNames))
+               fieldsSourceMap[key] = append(fieldsSourceMap[key], indirectScopeValue.FieldByName(field.Name))
+       }
+       for source, link := range linkHash {
+               for i, field := range fieldsSourceMap[source] {
+                       //If not 0 this means Value is a pointer and we already added preloaded models to it
+                       if fieldsSourceMap[source][i].Len() != 0 {
+                               continue
+                       }
+                       field.Set(reflect.Append(fieldsSourceMap[source][i], link...))
+               }
+
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_row_query.go b/vendor/github.com/jinzhu/gorm/callback_row_query.go
new file mode 100755 (executable)
index 0000000..c2ff4a0
--- /dev/null
@@ -0,0 +1,30 @@
+package gorm
+
+import "database/sql"
+
+// Define callbacks for row query
+func init() {
+       DefaultCallback.RowQuery().Register("gorm:row_query", rowQueryCallback)
+}
+
+type RowQueryResult struct {
+       Row *sql.Row
+}
+
+type RowsQueryResult struct {
+       Rows  *sql.Rows
+       Error error
+}
+
+// queryCallback used to query data from database
+func rowQueryCallback(scope *Scope) {
+       if result, ok := scope.InstanceGet("row_query_result"); ok {
+               scope.prepareQuerySQL()
+
+               if rowResult, ok := result.(*RowQueryResult); ok {
+                       rowResult.Row = scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...)
+               } else if rowsResult, ok := result.(*RowsQueryResult); ok {
+                       rowsResult.Rows, rowsResult.Error = scope.SQLDB().Query(scope.SQL, scope.SQLVars...)
+               }
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_save.go b/vendor/github.com/jinzhu/gorm/callback_save.go
new file mode 100755 (executable)
index 0000000..3b4e058
--- /dev/null
@@ -0,0 +1,170 @@
+package gorm
+
+import (
+       "reflect"
+       "strings"
+)
+
+func beginTransactionCallback(scope *Scope) {
+       scope.Begin()
+}
+
+func commitOrRollbackTransactionCallback(scope *Scope) {
+       scope.CommitOrRollback()
+}
+
+func saveAssociationCheck(scope *Scope, field *Field) (autoUpdate bool, autoCreate bool, saveReference bool, r *Relationship) {
+       checkTruth := func(value interface{}) bool {
+               if v, ok := value.(bool); ok && !v {
+                       return false
+               }
+
+               if v, ok := value.(string); ok {
+                       v = strings.ToLower(v)
+                       return v == "true"
+               }
+
+               return true
+       }
+
+       if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored {
+               if r = field.Relationship; r != nil {
+                       autoUpdate, autoCreate, saveReference = true, true, true
+
+                       if value, ok := scope.Get("gorm:save_associations"); ok {
+                               autoUpdate = checkTruth(value)
+                               autoCreate = autoUpdate
+                               saveReference = autoUpdate
+                       } else if value, ok := field.TagSettingsGet("SAVE_ASSOCIATIONS"); ok {
+                               autoUpdate = checkTruth(value)
+                               autoCreate = autoUpdate
+                               saveReference = autoUpdate
+                       }
+
+                       if value, ok := scope.Get("gorm:association_autoupdate"); ok {
+                               autoUpdate = checkTruth(value)
+                       } else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOUPDATE"); ok {
+                               autoUpdate = checkTruth(value)
+                       }
+
+                       if value, ok := scope.Get("gorm:association_autocreate"); ok {
+                               autoCreate = checkTruth(value)
+                       } else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOCREATE"); ok {
+                               autoCreate = checkTruth(value)
+                       }
+
+                       if value, ok := scope.Get("gorm:association_save_reference"); ok {
+                               saveReference = checkTruth(value)
+                       } else if value, ok := field.TagSettingsGet("ASSOCIATION_SAVE_REFERENCE"); ok {
+                               saveReference = checkTruth(value)
+                       }
+               }
+       }
+
+       return
+}
+
+func saveBeforeAssociationsCallback(scope *Scope) {
+       for _, field := range scope.Fields() {
+               autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field)
+
+               if relationship != nil && relationship.Kind == "belongs_to" {
+                       fieldValue := field.Field.Addr().Interface()
+                       newScope := scope.New(fieldValue)
+
+                       if newScope.PrimaryKeyZero() {
+                               if autoCreate {
+                                       scope.Err(scope.NewDB().Save(fieldValue).Error)
+                               }
+                       } else if autoUpdate {
+                               scope.Err(scope.NewDB().Save(fieldValue).Error)
+                       }
+
+                       if saveReference {
+                               if len(relationship.ForeignFieldNames) != 0 {
+                                       // set value's foreign key
+                                       for idx, fieldName := range relationship.ForeignFieldNames {
+                                               associationForeignName := relationship.AssociationForeignDBNames[idx]
+                                               if foreignField, ok := scope.New(fieldValue).FieldByName(associationForeignName); ok {
+                                                       scope.Err(scope.SetColumn(fieldName, foreignField.Field.Interface()))
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+func saveAfterAssociationsCallback(scope *Scope) {
+       for _, field := range scope.Fields() {
+               autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field)
+
+               if relationship != nil && (relationship.Kind == "has_one" || relationship.Kind == "has_many" || relationship.Kind == "many_to_many") {
+                       value := field.Field
+
+                       switch value.Kind() {
+                       case reflect.Slice:
+                               for i := 0; i < value.Len(); i++ {
+                                       newDB := scope.NewDB()
+                                       elem := value.Index(i).Addr().Interface()
+                                       newScope := newDB.NewScope(elem)
+
+                                       if saveReference {
+                                               if relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 {
+                                                       for idx, fieldName := range relationship.ForeignFieldNames {
+                                                               associationForeignName := relationship.AssociationForeignDBNames[idx]
+                                                               if f, ok := scope.FieldByName(associationForeignName); ok {
+                                                                       scope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))
+                                                               }
+                                                       }
+                                               }
+
+                                               if relationship.PolymorphicType != "" {
+                                                       scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue))
+                                               }
+                                       }
+
+                                       if newScope.PrimaryKeyZero() {
+                                               if autoCreate {
+                                                       scope.Err(newDB.Save(elem).Error)
+                                               }
+                                       } else if autoUpdate {
+                                               scope.Err(newDB.Save(elem).Error)
+                                       }
+
+                                       if !scope.New(newScope.Value).PrimaryKeyZero() && saveReference {
+                                               if joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil {
+                                                       scope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value))
+                                               }
+                                       }
+                               }
+                       default:
+                               elem := value.Addr().Interface()
+                               newScope := scope.New(elem)
+
+                               if saveReference {
+                                       if len(relationship.ForeignFieldNames) != 0 {
+                                               for idx, fieldName := range relationship.ForeignFieldNames {
+                                                       associationForeignName := relationship.AssociationForeignDBNames[idx]
+                                                       if f, ok := scope.FieldByName(associationForeignName); ok {
+                                                               scope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))
+                                                       }
+                                               }
+                                       }
+
+                                       if relationship.PolymorphicType != "" {
+                                               scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue))
+                                       }
+                               }
+
+                               if newScope.PrimaryKeyZero() {
+                                       if autoCreate {
+                                               scope.Err(scope.NewDB().Save(elem).Error)
+                                       }
+                               } else if autoUpdate {
+                                       scope.Err(scope.NewDB().Save(elem).Error)
+                               }
+                       }
+               }
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_system_test.go b/vendor/github.com/jinzhu/gorm/callback_system_test.go
new file mode 100755 (executable)
index 0000000..13ca3f4
--- /dev/null
@@ -0,0 +1,112 @@
+package gorm
+
+import (
+       "reflect"
+       "runtime"
+       "strings"
+       "testing"
+)
+
+func equalFuncs(funcs []*func(s *Scope), fnames []string) bool {
+       var names []string
+       for _, f := range funcs {
+               fnames := strings.Split(runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name(), ".")
+               names = append(names, fnames[len(fnames)-1])
+       }
+       return reflect.DeepEqual(names, fnames)
+}
+
+func create(s *Scope)        {}
+func beforeCreate1(s *Scope) {}
+func beforeCreate2(s *Scope) {}
+func afterCreate1(s *Scope)  {}
+func afterCreate2(s *Scope)  {}
+
+func TestRegisterCallback(t *testing.T) {
+       var callback = &Callback{}
+
+       callback.Create().Register("before_create1", beforeCreate1)
+       callback.Create().Register("before_create2", beforeCreate2)
+       callback.Create().Register("create", create)
+       callback.Create().Register("after_create1", afterCreate1)
+       callback.Create().Register("after_create2", afterCreate2)
+
+       if !equalFuncs(callback.creates, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) {
+               t.Errorf("register callback")
+       }
+}
+
+func TestRegisterCallbackWithOrder(t *testing.T) {
+       var callback1 = &Callback{}
+       callback1.Create().Register("before_create1", beforeCreate1)
+       callback1.Create().Register("create", create)
+       callback1.Create().Register("after_create1", afterCreate1)
+       callback1.Create().Before("after_create1").Register("after_create2", afterCreate2)
+       if !equalFuncs(callback1.creates, []string{"beforeCreate1", "create", "afterCreate2", "afterCreate1"}) {
+               t.Errorf("register callback with order")
+       }
+
+       var callback2 = &Callback{}
+
+       callback2.Update().Register("create", create)
+       callback2.Update().Before("create").Register("before_create1", beforeCreate1)
+       callback2.Update().After("after_create2").Register("after_create1", afterCreate1)
+       callback2.Update().Before("before_create1").Register("before_create2", beforeCreate2)
+       callback2.Update().Register("after_create2", afterCreate2)
+
+       if !equalFuncs(callback2.updates, []string{"beforeCreate2", "beforeCreate1", "create", "afterCreate2", "afterCreate1"}) {
+               t.Errorf("register callback with order")
+       }
+}
+
+func TestRegisterCallbackWithComplexOrder(t *testing.T) {
+       var callback1 = &Callback{}
+
+       callback1.Query().Before("after_create1").After("before_create1").Register("create", create)
+       callback1.Query().Register("before_create1", beforeCreate1)
+       callback1.Query().Register("after_create1", afterCreate1)
+
+       if !equalFuncs(callback1.queries, []string{"beforeCreate1", "create", "afterCreate1"}) {
+               t.Errorf("register callback with order")
+       }
+
+       var callback2 = &Callback{}
+
+       callback2.Delete().Before("after_create1").After("before_create1").Register("create", create)
+       callback2.Delete().Before("create").Register("before_create1", beforeCreate1)
+       callback2.Delete().After("before_create1").Register("before_create2", beforeCreate2)
+       callback2.Delete().Register("after_create1", afterCreate1)
+       callback2.Delete().After("after_create1").Register("after_create2", afterCreate2)
+
+       if !equalFuncs(callback2.deletes, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) {
+               t.Errorf("register callback with order")
+       }
+}
+
+func replaceCreate(s *Scope) {}
+
+func TestReplaceCallback(t *testing.T) {
+       var callback = &Callback{}
+
+       callback.Create().Before("after_create1").After("before_create1").Register("create", create)
+       callback.Create().Register("before_create1", beforeCreate1)
+       callback.Create().Register("after_create1", afterCreate1)
+       callback.Create().Replace("create", replaceCreate)
+
+       if !equalFuncs(callback.creates, []string{"beforeCreate1", "replaceCreate", "afterCreate1"}) {
+               t.Errorf("replace callback")
+       }
+}
+
+func TestRemoveCallback(t *testing.T) {
+       var callback = &Callback{}
+
+       callback.Create().Before("after_create1").After("before_create1").Register("create", create)
+       callback.Create().Register("before_create1", beforeCreate1)
+       callback.Create().Register("after_create1", afterCreate1)
+       callback.Create().Remove("create")
+
+       if !equalFuncs(callback.creates, []string{"beforeCreate1", "afterCreate1"}) {
+               t.Errorf("remove callback")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_update.go b/vendor/github.com/jinzhu/gorm/callback_update.go
new file mode 100755 (executable)
index 0000000..f6ba0ff
--- /dev/null
@@ -0,0 +1,121 @@
+package gorm
+
+import (
+       "errors"
+       "fmt"
+       "sort"
+       "strings"
+)
+
+// Define callbacks for updating
+func init() {
+       DefaultCallback.Update().Register("gorm:assign_updating_attributes", assignUpdatingAttributesCallback)
+       DefaultCallback.Update().Register("gorm:begin_transaction", beginTransactionCallback)
+       DefaultCallback.Update().Register("gorm:before_update", beforeUpdateCallback)
+       DefaultCallback.Update().Register("gorm:save_before_associations", saveBeforeAssociationsCallback)
+       DefaultCallback.Update().Register("gorm:update_time_stamp", updateTimeStampForUpdateCallback)
+       DefaultCallback.Update().Register("gorm:update", updateCallback)
+       DefaultCallback.Update().Register("gorm:save_after_associations", saveAfterAssociationsCallback)
+       DefaultCallback.Update().Register("gorm:after_update", afterUpdateCallback)
+       DefaultCallback.Update().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
+}
+
+// assignUpdatingAttributesCallback assign updating attributes to model
+func assignUpdatingAttributesCallback(scope *Scope) {
+       if attrs, ok := scope.InstanceGet("gorm:update_interface"); ok {
+               if updateMaps, hasUpdate := scope.updatedAttrsWithValues(attrs); hasUpdate {
+                       scope.InstanceSet("gorm:update_attrs", updateMaps)
+               } else {
+                       scope.SkipLeft()
+               }
+       }
+}
+
+// beforeUpdateCallback will invoke `BeforeSave`, `BeforeUpdate` method before updating
+func beforeUpdateCallback(scope *Scope) {
+       if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() {
+               scope.Err(errors.New("Missing WHERE clause while updating"))
+               return
+       }
+       if _, ok := scope.Get("gorm:update_column"); !ok {
+               if !scope.HasError() {
+                       scope.CallMethod("BeforeSave")
+               }
+               if !scope.HasError() {
+                       scope.CallMethod("BeforeUpdate")
+               }
+       }
+}
+
+// updateTimeStampForUpdateCallback will set `UpdatedAt` when updating
+func updateTimeStampForUpdateCallback(scope *Scope) {
+       if _, ok := scope.Get("gorm:update_column"); !ok {
+               scope.SetColumn("UpdatedAt", NowFunc())
+       }
+}
+
+// updateCallback the callback used to update data to database
+func updateCallback(scope *Scope) {
+       if !scope.HasError() {
+               var sqls []string
+
+               if updateAttrs, ok := scope.InstanceGet("gorm:update_attrs"); ok {
+                       // Sort the column names so that the generated SQL is the same every time.
+                       updateMap := updateAttrs.(map[string]interface{})
+                       var columns []string
+                       for c := range updateMap {
+                               columns = append(columns, c)
+                       }
+                       sort.Strings(columns)
+
+                       for _, column := range columns {
+                               value := updateMap[column]
+                               sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(column), scope.AddToVars(value)))
+                       }
+               } else {
+                       for _, field := range scope.Fields() {
+                               if scope.changeableField(field) {
+                                       if !field.IsPrimaryKey && field.IsNormal {
+                                               if !field.IsForeignKey || !field.IsBlank || !field.HasDefaultValue {
+                                                       sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface())))
+                                               }
+                                       } else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" {
+                                               for _, foreignKey := range relationship.ForeignDBNames {
+                                                       if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) {
+                                                               sqls = append(sqls,
+                                                                       fmt.Sprintf("%v = %v", scope.Quote(foreignField.DBName), scope.AddToVars(foreignField.Field.Interface())))
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+               }
+
+               var extraOption string
+               if str, ok := scope.Get("gorm:update_option"); ok {
+                       extraOption = fmt.Sprint(str)
+               }
+
+               if len(sqls) > 0 {
+                       scope.Raw(fmt.Sprintf(
+                               "UPDATE %v SET %v%v%v",
+                               scope.QuotedTableName(),
+                               strings.Join(sqls, ", "),
+                               addExtraSpaceIfExist(scope.CombinedConditionSql()),
+                               addExtraSpaceIfExist(extraOption),
+                       )).Exec()
+               }
+       }
+}
+
+// afterUpdateCallback will invoke `AfterUpdate`, `AfterSave` method after updating
+func afterUpdateCallback(scope *Scope) {
+       if _, ok := scope.Get("gorm:update_column"); !ok {
+               if !scope.HasError() {
+                       scope.CallMethod("AfterUpdate")
+               }
+               if !scope.HasError() {
+                       scope.CallMethod("AfterSave")
+               }
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callbacks_test.go b/vendor/github.com/jinzhu/gorm/callbacks_test.go
new file mode 100755 (executable)
index 0000000..a58913d
--- /dev/null
@@ -0,0 +1,177 @@
+package gorm_test
+
+import (
+       "errors"
+
+       "github.com/jinzhu/gorm"
+
+       "reflect"
+       "testing"
+)
+
+func (s *Product) BeforeCreate() (err error) {
+       if s.Code == "Invalid" {
+               err = errors.New("invalid product")
+       }
+       s.BeforeCreateCallTimes = s.BeforeCreateCallTimes + 1
+       return
+}
+
+func (s *Product) BeforeUpdate() (err error) {
+       if s.Code == "dont_update" {
+               err = errors.New("can't update")
+       }
+       s.BeforeUpdateCallTimes = s.BeforeUpdateCallTimes + 1
+       return
+}
+
+func (s *Product) BeforeSave() (err error) {
+       if s.Code == "dont_save" {
+               err = errors.New("can't save")
+       }
+       s.BeforeSaveCallTimes = s.BeforeSaveCallTimes + 1
+       return
+}
+
+func (s *Product) AfterFind() {
+       s.AfterFindCallTimes = s.AfterFindCallTimes + 1
+}
+
+func (s *Product) AfterCreate(tx *gorm.DB) {
+       tx.Model(s).UpdateColumn(Product{AfterCreateCallTimes: s.AfterCreateCallTimes + 1})
+}
+
+func (s *Product) AfterUpdate() {
+       s.AfterUpdateCallTimes = s.AfterUpdateCallTimes + 1
+}
+
+func (s *Product) AfterSave() (err error) {
+       if s.Code == "after_save_error" {
+               err = errors.New("can't save")
+       }
+       s.AfterSaveCallTimes = s.AfterSaveCallTimes + 1
+       return
+}
+
+func (s *Product) BeforeDelete() (err error) {
+       if s.Code == "dont_delete" {
+               err = errors.New("can't delete")
+       }
+       s.BeforeDeleteCallTimes = s.BeforeDeleteCallTimes + 1
+       return
+}
+
+func (s *Product) AfterDelete() (err error) {
+       if s.Code == "after_delete_error" {
+               err = errors.New("can't delete")
+       }
+       s.AfterDeleteCallTimes = s.AfterDeleteCallTimes + 1
+       return
+}
+
+func (s *Product) GetCallTimes() []int64 {
+       return []int64{s.BeforeCreateCallTimes, s.BeforeSaveCallTimes, s.BeforeUpdateCallTimes, s.AfterCreateCallTimes, s.AfterSaveCallTimes, s.AfterUpdateCallTimes, s.BeforeDeleteCallTimes, s.AfterDeleteCallTimes, s.AfterFindCallTimes}
+}
+
+func TestRunCallbacks(t *testing.T) {
+       p := Product{Code: "unique_code", Price: 100}
+       DB.Save(&p)
+
+       if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 1, 0, 0, 0, 0}) {
+               t.Errorf("Callbacks should be invoked successfully, %v", p.GetCallTimes())
+       }
+
+       DB.Where("Code = ?", "unique_code").First(&p)
+       if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 0, 0, 0, 0, 1}) {
+               t.Errorf("After callbacks values are not saved, %v", p.GetCallTimes())
+       }
+
+       p.Price = 200
+       DB.Save(&p)
+       if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 1, 1, 0, 0, 1}) {
+               t.Errorf("After update callbacks should be invoked successfully, %v", p.GetCallTimes())
+       }
+
+       var products []Product
+       DB.Find(&products, "code = ?", "unique_code")
+       if products[0].AfterFindCallTimes != 2 {
+               t.Errorf("AfterFind callbacks should work with slice")
+       }
+
+       DB.Where("Code = ?", "unique_code").First(&p)
+       if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 0, 0, 2}) {
+               t.Errorf("After update callbacks values are not saved, %v", p.GetCallTimes())
+       }
+
+       DB.Delete(&p)
+       if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 1, 1, 2}) {
+               t.Errorf("After delete callbacks should be invoked successfully, %v", p.GetCallTimes())
+       }
+
+       if DB.Where("Code = ?", "unique_code").First(&p).Error == nil {
+               t.Errorf("Can't find a deleted record")
+       }
+}
+
+func TestCallbacksWithErrors(t *testing.T) {
+       p := Product{Code: "Invalid", Price: 100}
+       if DB.Save(&p).Error == nil {
+               t.Errorf("An error from before create callbacks happened when create with invalid value")
+       }
+
+       if DB.Where("code = ?", "Invalid").First(&Product{}).Error == nil {
+               t.Errorf("Should not save record that have errors")
+       }
+
+       if DB.Save(&Product{Code: "dont_save", Price: 100}).Error == nil {
+               t.Errorf("An error from after create callbacks happened when create with invalid value")
+       }
+
+       p2 := Product{Code: "update_callback", Price: 100}
+       DB.Save(&p2)
+
+       p2.Code = "dont_update"
+       if DB.Save(&p2).Error == nil {
+               t.Errorf("An error from before update callbacks happened when update with invalid value")
+       }
+
+       if DB.Where("code = ?", "update_callback").First(&Product{}).Error != nil {
+               t.Errorf("Record Should not be updated due to errors happened in before update callback")
+       }
+
+       if DB.Where("code = ?", "dont_update").First(&Product{}).Error == nil {
+               t.Errorf("Record Should not be updated due to errors happened in before update callback")
+       }
+
+       p2.Code = "dont_save"
+       if DB.Save(&p2).Error == nil {
+               t.Errorf("An error from before save callbacks happened when update with invalid value")
+       }
+
+       p3 := Product{Code: "dont_delete", Price: 100}
+       DB.Save(&p3)
+       if DB.Delete(&p3).Error == nil {
+               t.Errorf("An error from before delete callbacks happened when delete")
+       }
+
+       if DB.Where("Code = ?", "dont_delete").First(&p3).Error != nil {
+               t.Errorf("An error from before delete callbacks happened")
+       }
+
+       p4 := Product{Code: "after_save_error", Price: 100}
+       DB.Save(&p4)
+       if err := DB.First(&Product{}, "code = ?", "after_save_error").Error; err == nil {
+               t.Errorf("Record should be reverted if get an error in after save callback")
+       }
+
+       p5 := Product{Code: "after_delete_error", Price: 100}
+       DB.Save(&p5)
+       if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil {
+               t.Errorf("Record should be found")
+       }
+
+       DB.Delete(&p5)
+       if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil {
+               t.Errorf("Record shouldn't be deleted because of an error happened in after delete callback")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/create_test.go b/vendor/github.com/jinzhu/gorm/create_test.go
new file mode 100755 (executable)
index 0000000..9256064
--- /dev/null
@@ -0,0 +1,231 @@
+package gorm_test
+
+import (
+       "os"
+       "reflect"
+       "testing"
+       "time"
+
+       "github.com/jinzhu/now"
+)
+
+func TestCreate(t *testing.T) {
+       float := 35.03554004971999
+       now := time.Now()
+       user := User{Name: "CreateUser", Age: 18, Birthday: &now, UserNum: Num(111), PasswordHash: []byte{'f', 'a', 'k', '4'}, Latitude: float}
+
+       if !DB.NewRecord(user) || !DB.NewRecord(&user) {
+               t.Error("User should be new record before create")
+       }
+
+       if count := DB.Save(&user).RowsAffected; count != 1 {
+               t.Error("There should be one record be affected when create record")
+       }
+
+       if DB.NewRecord(user) || DB.NewRecord(&user) {
+               t.Error("User should not new record after save")
+       }
+
+       var newUser User
+       if err := DB.First(&newUser, user.Id).Error; err != nil {
+               t.Errorf("No error should happen, but got %v", err)
+       }
+
+       if !reflect.DeepEqual(newUser.PasswordHash, []byte{'f', 'a', 'k', '4'}) {
+               t.Errorf("User's PasswordHash should be saved ([]byte)")
+       }
+
+       if newUser.Age != 18 {
+               t.Errorf("User's Age should be saved (int)")
+       }
+
+       if newUser.UserNum != Num(111) {
+               t.Errorf("User's UserNum should be saved (custom type), but got %v", newUser.UserNum)
+       }
+
+       if newUser.Latitude != float {
+               t.Errorf("Float64 should not be changed after save")
+       }
+
+       if user.CreatedAt.IsZero() {
+               t.Errorf("Should have created_at after create")
+       }
+
+       if newUser.CreatedAt.IsZero() {
+               t.Errorf("Should have created_at after create")
+       }
+
+       DB.Model(user).Update("name", "create_user_new_name")
+       DB.First(&user, user.Id)
+       if user.CreatedAt.Format(time.RFC3339Nano) != newUser.CreatedAt.Format(time.RFC3339Nano) {
+               t.Errorf("CreatedAt should not be changed after update")
+       }
+}
+
+func TestCreateEmptyStrut(t *testing.T) {
+       type EmptyStruct struct {
+               ID uint
+       }
+       DB.AutoMigrate(&EmptyStruct{})
+
+       if err := DB.Create(&EmptyStruct{}).Error; err != nil {
+               t.Errorf("No error should happen when creating user, but got %v", err)
+       }
+}
+
+func TestCreateWithExistingTimestamp(t *testing.T) {
+       user := User{Name: "CreateUserExistingTimestamp"}
+
+       timeA := now.MustParse("2016-01-01")
+       user.CreatedAt = timeA
+       user.UpdatedAt = timeA
+       DB.Save(&user)
+
+       if user.CreatedAt.UTC().Format(time.RFC3339) != timeA.UTC().Format(time.RFC3339) {
+               t.Errorf("CreatedAt should not be changed")
+       }
+
+       if user.UpdatedAt.UTC().Format(time.RFC3339) != timeA.UTC().Format(time.RFC3339) {
+               t.Errorf("UpdatedAt should not be changed")
+       }
+
+       var newUser User
+       DB.First(&newUser, user.Id)
+
+       if newUser.CreatedAt.UTC().Format(time.RFC3339) != timeA.UTC().Format(time.RFC3339) {
+               t.Errorf("CreatedAt should not be changed")
+       }
+
+       if newUser.UpdatedAt.UTC().Format(time.RFC3339) != timeA.UTC().Format(time.RFC3339) {
+               t.Errorf("UpdatedAt should not be changed")
+       }
+}
+
+type AutoIncrementUser struct {
+       User
+       Sequence uint `gorm:"AUTO_INCREMENT"`
+}
+
+func TestCreateWithAutoIncrement(t *testing.T) {
+       if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" {
+               t.Skip("Skipping this because only postgres properly support auto_increment on a non-primary_key column")
+       }
+
+       DB.AutoMigrate(&AutoIncrementUser{})
+
+       user1 := AutoIncrementUser{}
+       user2 := AutoIncrementUser{}
+
+       DB.Create(&user1)
+       DB.Create(&user2)
+
+       if user2.Sequence-user1.Sequence != 1 {
+               t.Errorf("Auto increment should apply on Sequence")
+       }
+}
+
+func TestCreateWithNoGORMPrimayKey(t *testing.T) {
+       if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" {
+               t.Skip("Skipping this because MSSQL will return identity only if the table has an Id column")
+       }
+
+       jt := JoinTable{From: 1, To: 2}
+       err := DB.Create(&jt).Error
+       if err != nil {
+               t.Errorf("No error should happen when create a record without a GORM primary key. But in the database this primary key exists and is the union of 2 or more fields\n But got: %s", err)
+       }
+}
+
+func TestCreateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) {
+       animal := Animal{Name: "Ferdinand"}
+       if DB.Save(&animal).Error != nil {
+               t.Errorf("No error should happen when create a record without std primary key")
+       }
+
+       if animal.Counter == 0 {
+               t.Errorf("No std primary key should be filled value after create")
+       }
+
+       if animal.Name != "Ferdinand" {
+               t.Errorf("Default value should be overrided")
+       }
+
+       // Test create with default value not overrided
+       an := Animal{From: "nerdz"}
+
+       if DB.Save(&an).Error != nil {
+               t.Errorf("No error should happen when create an record without std primary key")
+       }
+
+       // We must fetch the value again, to have the default fields updated
+       // (We can't do this in the update statements, since sql default can be expressions
+       // And be different from the fields' type (eg. a time.Time fields has a default value of "now()"
+       DB.Model(Animal{}).Where(&Animal{Counter: an.Counter}).First(&an)
+
+       if an.Name != "galeone" {
+               t.Errorf("Default value should fill the field. But got %v", an.Name)
+       }
+}
+
+func TestAnonymousScanner(t *testing.T) {
+       user := User{Name: "anonymous_scanner", Role: Role{Name: "admin"}}
+       DB.Save(&user)
+
+       var user2 User
+       DB.First(&user2, "name = ?", "anonymous_scanner")
+       if user2.Role.Name != "admin" {
+               t.Errorf("Should be able to get anonymous scanner")
+       }
+
+       if !user2.Role.IsAdmin() {
+               t.Errorf("Should be able to get anonymous scanner")
+       }
+}
+
+func TestAnonymousField(t *testing.T) {
+       user := User{Name: "anonymous_field", Company: Company{Name: "company"}}
+       DB.Save(&user)
+
+       var user2 User
+       DB.First(&user2, "name = ?", "anonymous_field")
+       DB.Model(&user2).Related(&user2.Company)
+       if user2.Company.Name != "company" {
+               t.Errorf("Should be able to get anonymous field")
+       }
+}
+
+func TestSelectWithCreate(t *testing.T) {
+       user := getPreparedUser("select_user", "select_with_create")
+       DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user)
+
+       var queryuser User
+       DB.Preload("BillingAddress").Preload("ShippingAddress").
+               Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id)
+
+       if queryuser.Name != user.Name || queryuser.Age == user.Age {
+               t.Errorf("Should only create users with name column")
+       }
+
+       if queryuser.BillingAddressID.Int64 == 0 || queryuser.ShippingAddressId != 0 ||
+               queryuser.CreditCard.ID == 0 || len(queryuser.Emails) == 0 {
+               t.Errorf("Should only create selected relationships")
+       }
+}
+
+func TestOmitWithCreate(t *testing.T) {
+       user := getPreparedUser("omit_user", "omit_with_create")
+       DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user)
+
+       var queryuser User
+       DB.Preload("BillingAddress").Preload("ShippingAddress").
+               Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id)
+
+       if queryuser.Name == user.Name || queryuser.Age != user.Age {
+               t.Errorf("Should only create users with age column")
+       }
+
+       if queryuser.BillingAddressID.Int64 != 0 || queryuser.ShippingAddressId == 0 ||
+               queryuser.CreditCard.ID != 0 || len(queryuser.Emails) != 0 {
+               t.Errorf("Should not create omitted relationships")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/customize_column_test.go b/vendor/github.com/jinzhu/gorm/customize_column_test.go
new file mode 100755 (executable)
index 0000000..5e19d6f
--- /dev/null
@@ -0,0 +1,346 @@
+package gorm_test
+
+import (
+       "testing"
+       "time"
+
+       "github.com/jinzhu/gorm"
+)
+
+type CustomizeColumn struct {
+       ID   int64      `gorm:"column:mapped_id; primary_key:yes"`
+       Name string     `gorm:"column:mapped_name"`
+       Date *time.Time `gorm:"column:mapped_time"`
+}
+
+// Make sure an ignored field does not interfere with another field's custom
+// column name that matches the ignored field.
+type CustomColumnAndIgnoredFieldClash struct {
+       Body    string `sql:"-"`
+       RawBody string `gorm:"column:body"`
+}
+
+func TestCustomizeColumn(t *testing.T) {
+       col := "mapped_name"
+       DB.DropTable(&CustomizeColumn{})
+       DB.AutoMigrate(&CustomizeColumn{})
+
+       scope := DB.NewScope(&CustomizeColumn{})
+       if !scope.Dialect().HasColumn(scope.TableName(), col) {
+               t.Errorf("CustomizeColumn should have column %s", col)
+       }
+
+       col = "mapped_id"
+       if scope.PrimaryKey() != col {
+               t.Errorf("CustomizeColumn should have primary key %s, but got %q", col, scope.PrimaryKey())
+       }
+
+       expected := "foo"
+       now := time.Now()
+       cc := CustomizeColumn{ID: 666, Name: expected, Date: &now}
+
+       if count := DB.Create(&cc).RowsAffected; count != 1 {
+               t.Error("There should be one record be affected when create record")
+       }
+
+       var cc1 CustomizeColumn
+       DB.First(&cc1, 666)
+
+       if cc1.Name != expected {
+               t.Errorf("Failed to query CustomizeColumn")
+       }
+
+       cc.Name = "bar"
+       DB.Save(&cc)
+
+       var cc2 CustomizeColumn
+       DB.First(&cc2, 666)
+       if cc2.Name != "bar" {
+               t.Errorf("Failed to query CustomizeColumn")
+       }
+}
+
+func TestCustomColumnAndIgnoredFieldClash(t *testing.T) {
+       DB.DropTable(&CustomColumnAndIgnoredFieldClash{})
+       if err := DB.AutoMigrate(&CustomColumnAndIgnoredFieldClash{}).Error; err != nil {
+               t.Errorf("Should not raise error: %s", err)
+       }
+}
+
+type CustomizePerson struct {
+       IdPerson string             `gorm:"column:idPerson;primary_key:true"`
+       Accounts []CustomizeAccount `gorm:"many2many:PersonAccount;associationforeignkey:idAccount;foreignkey:idPerson"`
+}
+
+type CustomizeAccount struct {
+       IdAccount string `gorm:"column:idAccount;primary_key:true"`
+       Name      string
+}
+
+func TestManyToManyWithCustomizedColumn(t *testing.T) {
+       DB.DropTable(&CustomizePerson{}, &CustomizeAccount{}, "PersonAccount")
+       DB.AutoMigrate(&CustomizePerson{}, &CustomizeAccount{})
+
+       account := CustomizeAccount{IdAccount: "account", Name: "id1"}
+       person := CustomizePerson{
+               IdPerson: "person",
+               Accounts: []CustomizeAccount{account},
+       }
+
+       if err := DB.Create(&account).Error; err != nil {
+               t.Errorf("no error should happen, but got %v", err)
+       }
+
+       if err := DB.Create(&person).Error; err != nil {
+               t.Errorf("no error should happen, but got %v", err)
+       }
+
+       var person1 CustomizePerson
+       scope := DB.NewScope(nil)
+       if err := DB.Preload("Accounts").First(&person1, scope.Quote("idPerson")+" = ?", person.IdPerson).Error; err != nil {
+               t.Errorf("no error should happen when preloading customized column many2many relations, but got %v", err)
+       }
+
+       if len(person1.Accounts) != 1 || person1.Accounts[0].IdAccount != "account" {
+               t.Errorf("should preload correct accounts")
+       }
+}
+
+type CustomizeUser struct {
+       gorm.Model
+       Email string `sql:"column:email_address"`
+}
+
+type CustomizeInvitation struct {
+       gorm.Model
+       Address string         `sql:"column:invitation"`
+       Person  *CustomizeUser `gorm:"foreignkey:Email;associationforeignkey:invitation"`
+}
+
+func TestOneToOneWithCustomizedColumn(t *testing.T) {
+       DB.DropTable(&CustomizeUser{}, &CustomizeInvitation{})
+       DB.AutoMigrate(&CustomizeUser{}, &CustomizeInvitation{})
+
+       user := CustomizeUser{
+               Email: "hello@example.com",
+       }
+       invitation := CustomizeInvitation{
+               Address: "hello@example.com",
+       }
+
+       DB.Create(&user)
+       DB.Create(&invitation)
+
+       var invitation2 CustomizeInvitation
+       if err := DB.Preload("Person").Find(&invitation2, invitation.ID).Error; err != nil {
+               t.Errorf("no error should happen, but got %v", err)
+       }
+
+       if invitation2.Person.Email != user.Email {
+               t.Errorf("Should preload one to one relation with customize foreign keys")
+       }
+}
+
+type PromotionDiscount struct {
+       gorm.Model
+       Name     string
+       Coupons  []*PromotionCoupon `gorm:"ForeignKey:discount_id"`
+       Rule     *PromotionRule     `gorm:"ForeignKey:discount_id"`
+       Benefits []PromotionBenefit `gorm:"ForeignKey:promotion_id"`
+}
+
+type PromotionBenefit struct {
+       gorm.Model
+       Name        string
+       PromotionID uint
+       Discount    PromotionDiscount `gorm:"ForeignKey:promotion_id"`
+}
+
+type PromotionCoupon struct {
+       gorm.Model
+       Code       string
+       DiscountID uint
+       Discount   PromotionDiscount
+}
+
+type PromotionRule struct {
+       gorm.Model
+       Name       string
+       Begin      *time.Time
+       End        *time.Time
+       DiscountID uint
+       Discount   *PromotionDiscount
+}
+
+func TestOneToManyWithCustomizedColumn(t *testing.T) {
+       DB.DropTable(&PromotionDiscount{}, &PromotionCoupon{})
+       DB.AutoMigrate(&PromotionDiscount{}, &PromotionCoupon{})
+
+       discount := PromotionDiscount{
+               Name: "Happy New Year",
+               Coupons: []*PromotionCoupon{
+                       {Code: "newyear1"},
+                       {Code: "newyear2"},
+               },
+       }
+
+       if err := DB.Create(&discount).Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       var discount1 PromotionDiscount
+       if err := DB.Preload("Coupons").First(&discount1, "id = ?", discount.ID).Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       if len(discount.Coupons) != 2 {
+               t.Errorf("should find two coupons")
+       }
+
+       var coupon PromotionCoupon
+       if err := DB.Preload("Discount").First(&coupon, "code = ?", "newyear1").Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       if coupon.Discount.Name != "Happy New Year" {
+               t.Errorf("should preload discount from coupon")
+       }
+}
+
+func TestHasOneWithPartialCustomizedColumn(t *testing.T) {
+       DB.DropTable(&PromotionDiscount{}, &PromotionRule{})
+       DB.AutoMigrate(&PromotionDiscount{}, &PromotionRule{})
+
+       var begin = time.Now()
+       var end = time.Now().Add(24 * time.Hour)
+       discount := PromotionDiscount{
+               Name: "Happy New Year 2",
+               Rule: &PromotionRule{
+                       Name:  "time_limited",
+                       Begin: &begin,
+                       End:   &end,
+               },
+       }
+
+       if err := DB.Create(&discount).Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       var discount1 PromotionDiscount
+       if err := DB.Preload("Rule").First(&discount1, "id = ?", discount.ID).Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       if discount.Rule.Begin.Format(time.RFC3339Nano) != begin.Format(time.RFC3339Nano) {
+               t.Errorf("Should be able to preload Rule")
+       }
+
+       var rule PromotionRule
+       if err := DB.Preload("Discount").First(&rule, "name = ?", "time_limited").Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       if rule.Discount.Name != "Happy New Year 2" {
+               t.Errorf("should preload discount from rule")
+       }
+}
+
+func TestBelongsToWithPartialCustomizedColumn(t *testing.T) {
+       DB.DropTable(&PromotionDiscount{}, &PromotionBenefit{})
+       DB.AutoMigrate(&PromotionDiscount{}, &PromotionBenefit{})
+
+       discount := PromotionDiscount{
+               Name: "Happy New Year 3",
+               Benefits: []PromotionBenefit{
+                       {Name: "free cod"},
+                       {Name: "free shipping"},
+               },
+       }
+
+       if err := DB.Create(&discount).Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       var discount1 PromotionDiscount
+       if err := DB.Preload("Benefits").First(&discount1, "id = ?", discount.ID).Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       if len(discount.Benefits) != 2 {
+               t.Errorf("should find two benefits")
+       }
+
+       var benefit PromotionBenefit
+       if err := DB.Preload("Discount").First(&benefit, "name = ?", "free cod").Error; err != nil {
+               t.Errorf("no error should happen but got %v", err)
+       }
+
+       if benefit.Discount.Name != "Happy New Year 3" {
+               t.Errorf("should preload discount from coupon")
+       }
+}
+
+type SelfReferencingUser struct {
+       gorm.Model
+       Name    string
+       Friends []*SelfReferencingUser `gorm:"many2many:UserFriends;association_jointable_foreignkey:friend_id"`
+}
+
+func TestSelfReferencingMany2ManyColumn(t *testing.T) {
+       DB.DropTable(&SelfReferencingUser{}, "UserFriends")
+       DB.AutoMigrate(&SelfReferencingUser{})
+
+       friend1 := SelfReferencingUser{Name: "friend1_m2m"}
+       if err := DB.Create(&friend1).Error; err != nil {
+               t.Errorf("no error should happen, but got %v", err)
+       }
+
+       friend2 := SelfReferencingUser{Name: "friend2_m2m"}
+       if err := DB.Create(&friend2).Error; err != nil {
+               t.Errorf("no error should happen, but got %v", err)
+       }
+
+       user := SelfReferencingUser{
+               Name:    "self_m2m",
+               Friends: []*SelfReferencingUser{&friend1, &friend2},
+       }
+
+       if err := DB.Create(&user).Error; err != nil {
+               t.Errorf("no error should happen, but got %v", err)
+       }
+
+       if DB.Model(&user).Association("Friends").Count() != 2 {
+               t.Errorf("Should find created friends correctly")
+       }
+
+       var newUser = SelfReferencingUser{}
+
+       if err := DB.Preload("Friends").First(&newUser, "id = ?", user.ID).Error; err != nil {
+               t.Errorf("no error should happen, but got %v", err)
+       }
+
+       if len(newUser.Friends) != 2 {
+               t.Errorf("Should preload created frineds for self reference m2m")
+       }
+
+       DB.Model(&newUser).Association("Friends").Append(&SelfReferencingUser{Name: "friend3_m2m"})
+       if DB.Model(&user).Association("Friends").Count() != 3 {
+               t.Errorf("Should find created friends correctly")
+       }
+
+       DB.Model(&newUser).Association("Friends").Replace(&SelfReferencingUser{Name: "friend4_m2m"})
+       if DB.Model(&user).Association("Friends").Count() != 1 {
+               t.Errorf("Should find created friends correctly")
+       }
+
+       friend := SelfReferencingUser{}
+       DB.Model(&newUser).Association("Friends").Find(&friend)
+       if friend.Name != "friend4_m2m" {
+               t.Errorf("Should find created friends correctly")
+       }
+
+       DB.Model(&newUser).Association("Friends").Delete(friend)
+       if DB.Model(&user).Association("Friends").Count() != 0 {
+               t.Errorf("All friends should be deleted")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/delete_test.go b/vendor/github.com/jinzhu/gorm/delete_test.go
new file mode 100755 (executable)
index 0000000..043641f
--- /dev/null
@@ -0,0 +1,91 @@
+package gorm_test
+
+import (
+       "testing"
+       "time"
+)
+
+func TestDelete(t *testing.T) {
+       user1, user2 := User{Name: "delete1"}, User{Name: "delete2"}
+       DB.Save(&user1)
+       DB.Save(&user2)
+
+       if err := DB.Delete(&user1).Error; err != nil {
+               t.Errorf("No error should happen when delete a record, err=%s", err)
+       }
+
+       if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() {
+               t.Errorf("User can't be found after delete")
+       }
+
+       if DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() {
+               t.Errorf("Other users that not deleted should be found-able")
+       }
+}
+
+func TestInlineDelete(t *testing.T) {
+       user1, user2 := User{Name: "inline_delete1"}, User{Name: "inline_delete2"}
+       DB.Save(&user1)
+       DB.Save(&user2)
+
+       if DB.Delete(&User{}, user1.Id).Error != nil {
+               t.Errorf("No error should happen when delete a record")
+       } else if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() {
+               t.Errorf("User can't be found after delete")
+       }
+
+       if err := DB.Delete(&User{}, "name = ?", user2.Name).Error; err != nil {
+               t.Errorf("No error should happen when delete a record, err=%s", err)
+       } else if !DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() {
+               t.Errorf("User can't be found after delete")
+       }
+}
+
+func TestSoftDelete(t *testing.T) {
+       type User struct {
+               Id        int64
+               Name      string
+               DeletedAt *time.Time
+       }
+       DB.AutoMigrate(&User{})
+
+       user := User{Name: "soft_delete"}
+       DB.Save(&user)
+       DB.Delete(&user)
+
+       if DB.First(&User{}, "name = ?", user.Name).Error == nil {
+               t.Errorf("Can't find a soft deleted record")
+       }
+
+       if err := DB.Unscoped().First(&User{}, "name = ?", user.Name).Error; err != nil {
+               t.Errorf("Should be able to find soft deleted record with Unscoped, but err=%s", err)
+       }
+
+       DB.Unscoped().Delete(&user)
+       if !DB.Unscoped().First(&User{}, "name = ?", user.Name).RecordNotFound() {
+               t.Errorf("Can't find permanently deleted record")
+       }
+}
+
+func TestSoftDeleteWithCustomizedDeletedAtColumnName(t *testing.T) {
+       creditCard := CreditCard{Number: "411111111234567"}
+       DB.Save(&creditCard)
+       DB.Delete(&creditCard)
+
+       if deletedAtField, ok := DB.NewScope(&CreditCard{}).FieldByName("DeletedAt"); !ok || deletedAtField.DBName != "deleted_time" {
+               t.Errorf("CreditCard's DeletedAt's column name should be `deleted_time`")
+       }
+
+       if DB.First(&CreditCard{}, "number = ?", creditCard.Number).Error == nil {
+               t.Errorf("Can't find a soft deleted record")
+       }
+
+       if err := DB.Unscoped().First(&CreditCard{}, "number = ?", creditCard.Number).Error; err != nil {
+               t.Errorf("Should be able to find soft deleted record with Unscoped, but err=%s", err)
+       }
+
+       DB.Unscoped().Delete(&creditCard)
+       if !DB.Unscoped().First(&CreditCard{}, "number = ?", creditCard.Number).RecordNotFound() {
+               t.Errorf("Can't find permanently deleted record")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect.go b/vendor/github.com/jinzhu/gorm/dialect.go
new file mode 100755 (executable)
index 0000000..27b308a
--- /dev/null
@@ -0,0 +1,138 @@
+package gorm
+
+import (
+       "database/sql"
+       "fmt"
+       "reflect"
+       "strconv"
+       "strings"
+)
+
+// Dialect interface contains behaviors that differ across SQL database
+type Dialect interface {
+       // GetName get dialect's name
+       GetName() string
+
+       // SetDB set db for dialect
+       SetDB(db SQLCommon)
+
+       // BindVar return the placeholder for actual values in SQL statements, in many dbs it is "?", Postgres using $1
+       BindVar(i int) string
+       // Quote quotes field name to avoid SQL parsing exceptions by using a reserved word as a field name
+       Quote(key string) string
+       // DataTypeOf return data's sql type
+       DataTypeOf(field *StructField) string
+
+       // HasIndex check has index or not
+       HasIndex(tableName string, indexName string) bool
+       // HasForeignKey check has foreign key or not
+       HasForeignKey(tableName string, foreignKeyName string) bool
+       // RemoveIndex remove index
+       RemoveIndex(tableName string, indexName string) error
+       // HasTable check has table or not
+       HasTable(tableName string) bool
+       // HasColumn check has column or not
+       HasColumn(tableName string, columnName string) bool
+       // ModifyColumn modify column's type
+       ModifyColumn(tableName string, columnName string, typ string) error
+
+       // LimitAndOffsetSQL return generated SQL with Limit and Offset, as mssql has special case
+       LimitAndOffsetSQL(limit, offset interface{}) string
+       // SelectFromDummyTable return select values, for most dbs, `SELECT values` just works, mysql needs `SELECT value FROM DUAL`
+       SelectFromDummyTable() string
+       // LastInsertIdReturningSuffix most dbs support LastInsertId, but postgres needs to use `RETURNING`
+       LastInsertIDReturningSuffix(tableName, columnName string) string
+       // DefaultValueStr
+       DefaultValueStr() string
+
+       // BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
+       BuildKeyName(kind, tableName string, fields ...string) string
+
+       // CurrentDatabase return current database name
+       CurrentDatabase() string
+}
+
+var dialectsMap = map[string]Dialect{}
+
+func newDialect(name string, db SQLCommon) Dialect {
+       if value, ok := dialectsMap[name]; ok {
+               dialect := reflect.New(reflect.TypeOf(value).Elem()).Interface().(Dialect)
+               dialect.SetDB(db)
+               return dialect
+       }
+
+       fmt.Printf("`%v` is not officially supported, running under compatibility mode.\n", name)
+       commontDialect := &commonDialect{}
+       commontDialect.SetDB(db)
+       return commontDialect
+}
+
+// RegisterDialect register new dialect
+func RegisterDialect(name string, dialect Dialect) {
+       dialectsMap[name] = dialect
+}
+
+// GetDialect gets the dialect for the specified dialect name
+func GetDialect(name string) (dialect Dialect, ok bool) {
+       dialect, ok = dialectsMap[name]
+       return
+}
+
+// ParseFieldStructForDialect get field's sql data type
+var ParseFieldStructForDialect = func(field *StructField, dialect Dialect) (fieldValue reflect.Value, sqlType string, size int, additionalType string) {
+       // Get redirected field type
+       var (
+               reflectType = field.Struct.Type
+               dataType, _ = field.TagSettingsGet("TYPE")
+       )
+
+       for reflectType.Kind() == reflect.Ptr {
+               reflectType = reflectType.Elem()
+       }
+
+       // Get redirected field value
+       fieldValue = reflect.Indirect(reflect.New(reflectType))
+
+       if gormDataType, ok := fieldValue.Interface().(interface {
+               GormDataType(Dialect) string
+       }); ok {
+               dataType = gormDataType.GormDataType(dialect)
+       }
+
+       // Get scanner's real value
+       if dataType == "" {
+               var getScannerValue func(reflect.Value)
+               getScannerValue = func(value reflect.Value) {
+                       fieldValue = value
+                       if _, isScanner := reflect.New(fieldValue.Type()).Interface().(sql.Scanner); isScanner && fieldValue.Kind() == reflect.Struct {
+                               getScannerValue(fieldValue.Field(0))
+                       }
+               }
+               getScannerValue(fieldValue)
+       }
+
+       // Default Size
+       if num, ok := field.TagSettingsGet("SIZE"); ok {
+               size, _ = strconv.Atoi(num)
+       } else {
+               size = 255
+       }
+
+       // Default type from tag setting
+       notNull, _ := field.TagSettingsGet("NOT NULL")
+       unique, _ := field.TagSettingsGet("UNIQUE")
+       additionalType = notNull + " " + unique
+       if value, ok := field.TagSettingsGet("DEFAULT"); ok {
+               additionalType = additionalType + " DEFAULT " + value
+       }
+
+       return fieldValue, dataType, size, strings.TrimSpace(additionalType)
+}
+
+func currentDatabaseAndTable(dialect Dialect, tableName string) (string, string) {
+       if strings.Contains(tableName, ".") {
+               splitStrings := strings.SplitN(tableName, ".", 2)
+               return splitStrings[0], splitStrings[1]
+       }
+       return dialect.CurrentDatabase(), tableName
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect_common.go b/vendor/github.com/jinzhu/gorm/dialect_common.go
new file mode 100755 (executable)
index 0000000..a479be7
--- /dev/null
@@ -0,0 +1,176 @@
+package gorm
+
+import (
+       "fmt"
+       "reflect"
+       "regexp"
+       "strconv"
+       "strings"
+       "time"
+)
+
+// DefaultForeignKeyNamer contains the default foreign key name generator method
+type DefaultForeignKeyNamer struct {
+}
+
+type commonDialect struct {
+       db SQLCommon
+       DefaultForeignKeyNamer
+}
+
+func init() {
+       RegisterDialect("common", &commonDialect{})
+}
+
+func (commonDialect) GetName() string {
+       return "common"
+}
+
+func (s *commonDialect) SetDB(db SQLCommon) {
+       s.db = db
+}
+
+func (commonDialect) BindVar(i int) string {
+       return "$$$" // ?
+}
+
+func (commonDialect) Quote(key string) string {
+       return fmt.Sprintf(`"%s"`, key)
+}
+
+func (s *commonDialect) fieldCanAutoIncrement(field *StructField) bool {
+       if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
+               return strings.ToLower(value) != "false"
+       }
+       return field.IsPrimaryKey
+}
+
+func (s *commonDialect) DataTypeOf(field *StructField) string {
+       var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
+
+       if sqlType == "" {
+               switch dataValue.Kind() {
+               case reflect.Bool:
+                       sqlType = "BOOLEAN"
+               case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
+                       if s.fieldCanAutoIncrement(field) {
+                               sqlType = "INTEGER AUTO_INCREMENT"
+                       } else {
+                               sqlType = "INTEGER"
+                       }
+               case reflect.Int64, reflect.Uint64:
+                       if s.fieldCanAutoIncrement(field) {
+                               sqlType = "BIGINT AUTO_INCREMENT"
+                       } else {
+                               sqlType = "BIGINT"
+                       }
+               case reflect.Float32, reflect.Float64:
+                       sqlType = "FLOAT"
+               case reflect.String:
+                       if size > 0 && size < 65532 {
+                               sqlType = fmt.Sprintf("VARCHAR(%d)", size)
+                       } else {
+                               sqlType = "VARCHAR(65532)"
+                       }
+               case reflect.Struct:
+                       if _, ok := dataValue.Interface().(time.Time); ok {
+                               sqlType = "TIMESTAMP"
+                       }
+               default:
+                       if _, ok := dataValue.Interface().([]byte); ok {
+                               if size > 0 && size < 65532 {
+                                       sqlType = fmt.Sprintf("BINARY(%d)", size)
+                               } else {
+                                       sqlType = "BINARY(65532)"
+                               }
+                       }
+               }
+       }
+
+       if sqlType == "" {
+               panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String()))
+       }
+
+       if strings.TrimSpace(additionalType) == "" {
+               return sqlType
+       }
+       return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s commonDialect) HasIndex(tableName string, indexName string) bool {
+       var count int
+       currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+       s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count)
+       return count > 0
+}
+
+func (s commonDialect) RemoveIndex(tableName string, indexName string) error {
+       _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName))
+       return err
+}
+
+func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool {
+       return false
+}
+
+func (s commonDialect) HasTable(tableName string) bool {
+       var count int
+       currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+       s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count)
+       return count > 0
+}
+
+func (s commonDialect) HasColumn(tableName string, columnName string) bool {
+       var count int
+       currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+       s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count)
+       return count > 0
+}
+
+func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error {
+       _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ))
+       return err
+}
+
+func (s commonDialect) CurrentDatabase() (name string) {
+       s.db.QueryRow("SELECT DATABASE()").Scan(&name)
+       return
+}
+
+func (commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string) {
+       if limit != nil {
+               if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 {
+                       sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
+               }
+       }
+       if offset != nil {
+               if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 {
+                       sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
+               }
+       }
+       return
+}
+
+func (commonDialect) SelectFromDummyTable() string {
+       return ""
+}
+
+func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string {
+       return ""
+}
+
+func (commonDialect) DefaultValueStr() string {
+       return "DEFAULT VALUES"
+}
+
+// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
+func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string {
+       keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_"))
+       keyName = regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString(keyName, "_")
+       return keyName
+}
+
+// IsByteArrayOrSlice returns true of the reflected value is an array or slice
+func IsByteArrayOrSlice(value reflect.Value) bool {
+       return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0))
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect_mysql.go b/vendor/github.com/jinzhu/gorm/dialect_mysql.go
new file mode 100755 (executable)
index 0000000..5d63e5c
--- /dev/null
@@ -0,0 +1,191 @@
+package gorm
+
+import (
+       "crypto/sha1"
+       "fmt"
+       "reflect"
+       "regexp"
+       "strconv"
+       "strings"
+       "time"
+       "unicode/utf8"
+)
+
+type mysql struct {
+       commonDialect
+}
+
+func init() {
+       RegisterDialect("mysql", &mysql{})
+}
+
+func (mysql) GetName() string {
+       return "mysql"
+}
+
+func (mysql) Quote(key string) string {
+       return fmt.Sprintf("`%s`", key)
+}
+
+// Get Data Type for MySQL Dialect
+func (s *mysql) DataTypeOf(field *StructField) string {
+       var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
+
+       // MySQL allows only one auto increment column per table, and it must
+       // be a KEY column.
+       if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
+               if _, ok = field.TagSettingsGet("INDEX"); !ok && !field.IsPrimaryKey {
+                       field.TagSettingsDelete("AUTO_INCREMENT")
+               }
+       }
+
+       if sqlType == "" {
+               switch dataValue.Kind() {
+               case reflect.Bool:
+                       sqlType = "boolean"
+               case reflect.Int8:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "tinyint AUTO_INCREMENT"
+                       } else {
+                               sqlType = "tinyint"
+                       }
+               case reflect.Int, reflect.Int16, reflect.Int32:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "int AUTO_INCREMENT"
+                       } else {
+                               sqlType = "int"
+                       }
+               case reflect.Uint8:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "tinyint unsigned AUTO_INCREMENT"
+                       } else {
+                               sqlType = "tinyint unsigned"
+                       }
+               case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "int unsigned AUTO_INCREMENT"
+                       } else {
+                               sqlType = "int unsigned"
+                       }
+               case reflect.Int64:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "bigint AUTO_INCREMENT"
+                       } else {
+                               sqlType = "bigint"
+                       }
+               case reflect.Uint64:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "bigint unsigned AUTO_INCREMENT"
+                       } else {
+                               sqlType = "bigint unsigned"
+                       }
+               case reflect.Float32, reflect.Float64:
+                       sqlType = "double"
+               case reflect.String:
+                       if size > 0 && size < 65532 {
+                               sqlType = fmt.Sprintf("varchar(%d)", size)
+                       } else {
+                               sqlType = "longtext"
+                       }
+               case reflect.Struct:
+                       if _, ok := dataValue.Interface().(time.Time); ok {
+                               precision := ""
+                               if p, ok := field.TagSettingsGet("PRECISION"); ok {
+                                       precision = fmt.Sprintf("(%s)", p)
+                               }
+
+                               if _, ok := field.TagSettingsGet("NOT NULL"); ok {
+                                       sqlType = fmt.Sprintf("timestamp%v", precision)
+                               } else {
+                                       sqlType = fmt.Sprintf("timestamp%v NULL", precision)
+                               }
+                       }
+               default:
+                       if IsByteArrayOrSlice(dataValue) {
+                               if size > 0 && size < 65532 {
+                                       sqlType = fmt.Sprintf("varbinary(%d)", size)
+                               } else {
+                                       sqlType = "longblob"
+                               }
+                       }
+               }
+       }
+
+       if sqlType == "" {
+               panic(fmt.Sprintf("invalid sql type %s (%s) for mysql", dataValue.Type().Name(), dataValue.Kind().String()))
+       }
+
+       if strings.TrimSpace(additionalType) == "" {
+               return sqlType
+       }
+       return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s mysql) RemoveIndex(tableName string, indexName string) error {
+       _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName)))
+       return err
+}
+
+func (s mysql) ModifyColumn(tableName string, columnName string, typ string) error {
+       _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v MODIFY COLUMN %v %v", tableName, columnName, typ))
+       return err
+}
+
+func (s mysql) LimitAndOffsetSQL(limit, offset interface{}) (sql string) {
+       if limit != nil {
+               if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 {
+                       sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
+
+                       if offset != nil {
+                               if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 {
+                                       sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
+                               }
+                       }
+               }
+       }
+       return
+}
+
+func (s mysql) HasForeignKey(tableName string, foreignKeyName string) bool {
+       var count int
+       currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+       s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_SCHEMA=? AND TABLE_NAME=? AND CONSTRAINT_NAME=? AND CONSTRAINT_TYPE='FOREIGN KEY'", currentDatabase, tableName, foreignKeyName).Scan(&count)
+       return count > 0
+}
+
+func (s mysql) CurrentDatabase() (name string) {
+       s.db.QueryRow("SELECT DATABASE()").Scan(&name)
+       return
+}
+
+func (mysql) SelectFromDummyTable() string {
+       return "FROM DUAL"
+}
+
+func (s mysql) BuildKeyName(kind, tableName string, fields ...string) string {
+       keyName := s.commonDialect.BuildKeyName(kind, tableName, fields...)
+       if utf8.RuneCountInString(keyName) <= 64 {
+               return keyName
+       }
+       h := sha1.New()
+       h.Write([]byte(keyName))
+       bs := h.Sum(nil)
+
+       // sha1 is 40 characters, keep first 24 characters of destination
+       destRunes := []rune(regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString(fields[0], "_"))
+       if len(destRunes) > 24 {
+               destRunes = destRunes[:24]
+       }
+
+       return fmt.Sprintf("%s%x", string(destRunes), bs)
+}
+
+func (mysql) DefaultValueStr() string {
+       return "VALUES()"
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect_postgres.go b/vendor/github.com/jinzhu/gorm/dialect_postgres.go
new file mode 100755 (executable)
index 0000000..53d3138
--- /dev/null
@@ -0,0 +1,143 @@
+package gorm
+
+import (
+       "encoding/json"
+       "fmt"
+       "reflect"
+       "strings"
+       "time"
+)
+
+type postgres struct {
+       commonDialect
+}
+
+func init() {
+       RegisterDialect("postgres", &postgres{})
+       RegisterDialect("cloudsqlpostgres", &postgres{})
+}
+
+func (postgres) GetName() string {
+       return "postgres"
+}
+
+func (postgres) BindVar(i int) string {
+       return fmt.Sprintf("$%v", i)
+}
+
+func (s *postgres) DataTypeOf(field *StructField) string {
+       var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
+
+       if sqlType == "" {
+               switch dataValue.Kind() {
+               case reflect.Bool:
+                       sqlType = "boolean"
+               case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uintptr:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "serial"
+                       } else {
+                               sqlType = "integer"
+                       }
+               case reflect.Int64, reflect.Uint32, reflect.Uint64:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "bigserial"
+                       } else {
+                               sqlType = "bigint"
+                       }
+               case reflect.Float32, reflect.Float64:
+                       sqlType = "numeric"
+               case reflect.String:
+                       if _, ok := field.TagSettingsGet("SIZE"); !ok {
+                               size = 0 // if SIZE haven't been set, use `text` as the default type, as there are no performance different
+                       }
+
+                       if size > 0 && size < 65532 {
+                               sqlType = fmt.Sprintf("varchar(%d)", size)
+                       } else {
+                               sqlType = "text"
+                       }
+               case reflect.Struct:
+                       if _, ok := dataValue.Interface().(time.Time); ok {
+                               sqlType = "timestamp with time zone"
+                       }
+               case reflect.Map:
+                       if dataValue.Type().Name() == "Hstore" {
+                               sqlType = "hstore"
+                       }
+               default:
+                       if IsByteArrayOrSlice(dataValue) {
+                               sqlType = "bytea"
+
+                               if isUUID(dataValue) {
+                                       sqlType = "uuid"
+                               }
+
+                               if isJSON(dataValue) {
+                                       sqlType = "jsonb"
+                               }
+                       }
+               }
+       }
+
+       if sqlType == "" {
+               panic(fmt.Sprintf("invalid sql type %s (%s) for postgres", dataValue.Type().Name(), dataValue.Kind().String()))
+       }
+
+       if strings.TrimSpace(additionalType) == "" {
+               return sqlType
+       }
+       return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s postgres) HasIndex(tableName string, indexName string) bool {
+       var count int
+       s.db.QueryRow("SELECT count(*) FROM pg_indexes WHERE tablename = $1 AND indexname = $2 AND schemaname = CURRENT_SCHEMA()", tableName, indexName).Scan(&count)
+       return count > 0
+}
+
+func (s postgres) HasForeignKey(tableName string, foreignKeyName string) bool {
+       var count int
+       s.db.QueryRow("SELECT count(con.conname) FROM pg_constraint con WHERE $1::regclass::oid = con.conrelid AND con.conname = $2 AND con.contype='f'", tableName, foreignKeyName).Scan(&count)
+       return count > 0
+}
+
+func (s postgres) HasTable(tableName string) bool {
+       var count int
+       s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = $1 AND table_type = 'BASE TABLE' AND table_schema = CURRENT_SCHEMA()", tableName).Scan(&count)
+       return count > 0
+}
+
+func (s postgres) HasColumn(tableName string, columnName string) bool {
+       var count int
+       s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_name = $1 AND column_name = $2 AND table_schema = CURRENT_SCHEMA()", tableName, columnName).Scan(&count)
+       return count > 0
+}
+
+func (s postgres) CurrentDatabase() (name string) {
+       s.db.QueryRow("SELECT CURRENT_DATABASE()").Scan(&name)
+       return
+}
+
+func (s postgres) LastInsertIDReturningSuffix(tableName, key string) string {
+       return fmt.Sprintf("RETURNING %v.%v", tableName, key)
+}
+
+func (postgres) SupportLastInsertID() bool {
+       return false
+}
+
+func isUUID(value reflect.Value) bool {
+       if value.Kind() != reflect.Array || value.Type().Len() != 16 {
+               return false
+       }
+       typename := value.Type().Name()
+       lower := strings.ToLower(typename)
+       return "uuid" == lower || "guid" == lower
+}
+
+func isJSON(value reflect.Value) bool {
+       _, ok := value.Interface().(json.RawMessage)
+       return ok
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go b/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go
new file mode 100755 (executable)
index 0000000..5f96c36
--- /dev/null
@@ -0,0 +1,107 @@
+package gorm
+
+import (
+       "fmt"
+       "reflect"
+       "strings"
+       "time"
+)
+
+type sqlite3 struct {
+       commonDialect
+}
+
+func init() {
+       RegisterDialect("sqlite3", &sqlite3{})
+}
+
+func (sqlite3) GetName() string {
+       return "sqlite3"
+}
+
+// Get Data Type for Sqlite Dialect
+func (s *sqlite3) DataTypeOf(field *StructField) string {
+       var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
+
+       if sqlType == "" {
+               switch dataValue.Kind() {
+               case reflect.Bool:
+                       sqlType = "bool"
+               case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "integer primary key autoincrement"
+                       } else {
+                               sqlType = "integer"
+                       }
+               case reflect.Int64, reflect.Uint64:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "integer primary key autoincrement"
+                       } else {
+                               sqlType = "bigint"
+                       }
+               case reflect.Float32, reflect.Float64:
+                       sqlType = "real"
+               case reflect.String:
+                       if size > 0 && size < 65532 {
+                               sqlType = fmt.Sprintf("varchar(%d)", size)
+                       } else {
+                               sqlType = "text"
+                       }
+               case reflect.Struct:
+                       if _, ok := dataValue.Interface().(time.Time); ok {
+                               sqlType = "datetime"
+                       }
+               default:
+                       if IsByteArrayOrSlice(dataValue) {
+                               sqlType = "blob"
+                       }
+               }
+       }
+
+       if sqlType == "" {
+               panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite3", dataValue.Type().Name(), dataValue.Kind().String()))
+       }
+
+       if strings.TrimSpace(additionalType) == "" {
+               return sqlType
+       }
+       return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s sqlite3) HasIndex(tableName string, indexName string) bool {
+       var count int
+       s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count)
+       return count > 0
+}
+
+func (s sqlite3) HasTable(tableName string) bool {
+       var count int
+       s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count)
+       return count > 0
+}
+
+func (s sqlite3) HasColumn(tableName string, columnName string) bool {
+       var count int
+       s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');\n", columnName, columnName), tableName).Scan(&count)
+       return count > 0
+}
+
+func (s sqlite3) CurrentDatabase() (name string) {
+       var (
+               ifaces   = make([]interface{}, 3)
+               pointers = make([]*string, 3)
+               i        int
+       )
+       for i = 0; i < 3; i++ {
+               ifaces[i] = &pointers[i]
+       }
+       if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil {
+               return
+       }
+       if pointers[1] != nil {
+               name = *pointers[1]
+       }
+       return
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go b/vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go
new file mode 100755 (executable)
index 0000000..6c424bc
--- /dev/null
@@ -0,0 +1,231 @@
+package mssql
+
+import (
+       "database/sql/driver"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "reflect"
+       "strconv"
+       "strings"
+       "time"
+
+       // Importing mssql driver package only in dialect file, otherwide not needed
+       _ "github.com/denisenkom/go-mssqldb"
+       "github.com/jinzhu/gorm"
+)
+
+func setIdentityInsert(scope *gorm.Scope) {
+       if scope.Dialect().GetName() == "mssql" {
+               for _, field := range scope.PrimaryFields() {
+                       if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok && !field.IsBlank {
+                               scope.NewDB().Exec(fmt.Sprintf("SET IDENTITY_INSERT %v ON", scope.TableName()))
+                               scope.InstanceSet("mssql:identity_insert_on", true)
+                       }
+               }
+       }
+}
+
+func turnOffIdentityInsert(scope *gorm.Scope) {
+       if scope.Dialect().GetName() == "mssql" {
+               if _, ok := scope.InstanceGet("mssql:identity_insert_on"); ok {
+                       scope.NewDB().Exec(fmt.Sprintf("SET IDENTITY_INSERT %v OFF", scope.TableName()))
+               }
+       }
+}
+
+func init() {
+       gorm.DefaultCallback.Create().After("gorm:begin_transaction").Register("mssql:set_identity_insert", setIdentityInsert)
+       gorm.DefaultCallback.Create().Before("gorm:commit_or_rollback_transaction").Register("mssql:turn_off_identity_insert", turnOffIdentityInsert)
+       gorm.RegisterDialect("mssql", &mssql{})
+}
+
+type mssql struct {
+       db gorm.SQLCommon
+       gorm.DefaultForeignKeyNamer
+}
+
+func (mssql) GetName() string {
+       return "mssql"
+}
+
+func (s *mssql) SetDB(db gorm.SQLCommon) {
+       s.db = db
+}
+
+func (mssql) BindVar(i int) string {
+       return "$$$" // ?
+}
+
+func (mssql) Quote(key string) string {
+       return fmt.Sprintf(`[%s]`, key)
+}
+
+func (s *mssql) DataTypeOf(field *gorm.StructField) string {
+       var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field, s)
+
+       if sqlType == "" {
+               switch dataValue.Kind() {
+               case reflect.Bool:
+                       sqlType = "bit"
+               case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "int IDENTITY(1,1)"
+                       } else {
+                               sqlType = "int"
+                       }
+               case reflect.Int64, reflect.Uint64:
+                       if s.fieldCanAutoIncrement(field) {
+                               field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+                               sqlType = "bigint IDENTITY(1,1)"
+                       } else {
+                               sqlType = "bigint"
+                       }
+               case reflect.Float32, reflect.Float64:
+                       sqlType = "float"
+               case reflect.String:
+                       if size > 0 && size < 8000 {
+                               sqlType = fmt.Sprintf("nvarchar(%d)", size)
+                       } else {
+                               sqlType = "nvarchar(max)"
+                       }
+               case reflect.Struct:
+                       if _, ok := dataValue.Interface().(time.Time); ok {
+                               sqlType = "datetimeoffset"
+                       }
+               default:
+                       if gorm.IsByteArrayOrSlice(dataValue) {
+                               if size > 0 && size < 8000 {
+                                       sqlType = fmt.Sprintf("varbinary(%d)", size)
+                               } else {
+                                       sqlType = "varbinary(max)"
+                               }
+                       }
+               }
+       }
+
+       if sqlType == "" {
+               panic(fmt.Sprintf("invalid sql type %s (%s) for mssql", dataValue.Type().Name(), dataValue.Kind().String()))
+       }
+
+       if strings.TrimSpace(additionalType) == "" {
+               return sqlType
+       }
+       return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s mssql) fieldCanAutoIncrement(field *gorm.StructField) bool {
+       if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
+               return value != "FALSE"
+       }
+       return field.IsPrimaryKey
+}
+
+func (s mssql) HasIndex(tableName string, indexName string) bool {
+       var count int
+       s.db.QueryRow("SELECT count(*) FROM sys.indexes WHERE name=? AND object_id=OBJECT_ID(?)", indexName, tableName).Scan(&count)
+       return count > 0
+}
+
+func (s mssql) RemoveIndex(tableName string, indexName string) error {
+       _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName)))
+       return err
+}
+
+func (s mssql) HasForeignKey(tableName string, foreignKeyName string) bool {
+       var count int
+       currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+       s.db.QueryRow(`SELECT count(*) 
+       FROM sys.foreign_keys as F inner join sys.tables as T on F.parent_object_id=T.object_id 
+               inner join information_schema.tables as I on I.TABLE_NAME = T.name 
+       WHERE F.name = ? 
+               AND T.Name = ? AND I.TABLE_CATALOG = ?;`, foreignKeyName, tableName, currentDatabase).Scan(&count)
+       return count > 0
+}
+
+func (s mssql) HasTable(tableName string) bool {
+       var count int
+       currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+       s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = ? AND table_catalog = ?", tableName, currentDatabase).Scan(&count)
+       return count > 0
+}
+
+func (s mssql) HasColumn(tableName string, columnName string) bool {
+       var count int
+       currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+       s.db.QueryRow("SELECT count(*) FROM information_schema.columns WHERE table_catalog = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count)
+       return count > 0
+}
+
+func (s mssql) ModifyColumn(tableName string, columnName string, typ string) error {
+       _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v %v", tableName, columnName, typ))
+       return err
+}
+
+func (s mssql) CurrentDatabase() (name string) {
+       s.db.QueryRow("SELECT DB_NAME() AS [Current Database]").Scan(&name)
+       return
+}
+
+func (mssql) LimitAndOffsetSQL(limit, offset interface{}) (sql string) {
+       if offset != nil {
+               if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 {
+                       sql += fmt.Sprintf(" OFFSET %d ROWS", parsedOffset)
+               }
+       }
+       if limit != nil {
+               if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 {
+                       if sql == "" {
+                               // add default zero offset
+                               sql += " OFFSET 0 ROWS"
+                       }
+                       sql += fmt.Sprintf(" FETCH NEXT %d ROWS ONLY", parsedLimit)
+               }
+       }
+       return
+}
+
+func (mssql) SelectFromDummyTable() string {
+       return ""
+}
+
+func (mssql) LastInsertIDReturningSuffix(tableName, columnName string) string {
+       return ""
+}
+
+func (mssql) DefaultValueStr() string {
+       return "DEFAULT VALUES"
+}
+
+func currentDatabaseAndTable(dialect gorm.Dialect, tableName string) (string, string) {
+       if strings.Contains(tableName, ".") {
+               splitStrings := strings.SplitN(tableName, ".", 2)
+               return splitStrings[0], splitStrings[1]
+       }
+       return dialect.CurrentDatabase(), tableName
+}
+
+// JSON type to support easy handling of JSON data in character table fields
+// using golang json.RawMessage for deferred decoding/encoding
+type JSON struct {
+       json.RawMessage
+}
+
+// Value get value of JSON
+func (j JSON) Value() (driver.Value, error) {
+       if len(j.RawMessage) == 0 {
+               return nil, nil
+       }
+       return j.MarshalJSON()
+}
+
+// Scan scan value into JSON
+func (j *JSON) Scan(value interface{}) error {
+       str, ok := value.(string)
+       if !ok {
+               return errors.New(fmt.Sprint("Failed to unmarshal JSONB value (strcast):", value))
+       }
+       bytes := []byte(str)
+       return json.Unmarshal(bytes, j)
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go b/vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go
new file mode 100755 (executable)
index 0000000..9deba48
--- /dev/null
@@ -0,0 +1,3 @@
+package mysql
+
+import _ "github.com/go-sql-driver/mysql"
diff --git a/vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go b/vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go
new file mode 100755 (executable)
index 0000000..424e8bd
--- /dev/null
@@ -0,0 +1,80 @@
+package postgres
+
+import (
+       "database/sql"
+       "database/sql/driver"
+
+       "encoding/json"
+       "errors"
+       "fmt"
+       _ "github.com/lib/pq"
+       "github.com/lib/pq/hstore"
+)
+
+type Hstore map[string]*string
+
+// Value get value of Hstore
+func (h Hstore) Value() (driver.Value, error) {
+       hstore := hstore.Hstore{Map: map[string]sql.NullString{}}
+       if len(h) == 0 {
+               return nil, nil
+       }
+
+       for key, value := range h {
+               var s sql.NullString
+               if value != nil {
+                       s.String = *value
+                       s.Valid = true
+               }
+               hstore.Map[key] = s
+       }
+       return hstore.Value()
+}
+
+// Scan scan value into Hstore
+func (h *Hstore) Scan(value interface{}) error {
+       hstore := hstore.Hstore{}
+
+       if err := hstore.Scan(value); err != nil {
+               return err
+       }
+
+       if len(hstore.Map) == 0 {
+               return nil
+       }
+
+       *h = Hstore{}
+       for k := range hstore.Map {
+               if hstore.Map[k].Valid {
+                       s := hstore.Map[k].String
+                       (*h)[k] = &s
+               } else {
+                       (*h)[k] = nil
+               }
+       }
+
+       return nil
+}
+
+// Jsonb Postgresql's JSONB data type
+type Jsonb struct {
+       json.RawMessage
+}
+
+// Value get value of Jsonb
+func (j Jsonb) Value() (driver.Value, error) {
+       if len(j.RawMessage) == 0 {
+               return nil, nil
+       }
+       return j.MarshalJSON()
+}
+
+// Scan scan value into Jsonb
+func (j *Jsonb) Scan(value interface{}) error {
+       bytes, ok := value.([]byte)
+       if !ok {
+               return errors.New(fmt.Sprint("Failed to unmarshal JSONB value:", value))
+       }
+
+       return json.Unmarshal(bytes, j)
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go b/vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go
new file mode 100755 (executable)
index 0000000..069ad3a
--- /dev/null
@@ -0,0 +1,3 @@
+package sqlite
+
+import _ "github.com/mattn/go-sqlite3"
diff --git a/vendor/github.com/jinzhu/gorm/docker-compose.yml b/vendor/github.com/jinzhu/gorm/docker-compose.yml
new file mode 100755 (executable)
index 0000000..79bf5fc
--- /dev/null
@@ -0,0 +1,30 @@
+version: '3'
+
+services:
+  mysql:
+    image: 'mysql:latest'
+    ports:
+      - 9910:3306
+    environment:
+      - MYSQL_DATABASE=gorm
+      - MYSQL_USER=gorm
+      - MYSQL_PASSWORD=gorm
+      - MYSQL_RANDOM_ROOT_PASSWORD="yes"
+  postgres:
+    image: 'postgres:latest'
+    ports:
+      - 9920:5432
+    environment:
+      - POSTGRES_USER=gorm
+      - POSTGRES_DB=gorm
+      - POSTGRES_PASSWORD=gorm
+  mssql:
+    image: 'mcmoe/mssqldocker:latest'
+    ports:
+      - 9930:1433
+    environment:
+      - ACCEPT_EULA=Y
+      - SA_PASSWORD=LoremIpsum86
+      - MSSQL_DB=gorm
+      - MSSQL_USER=gorm
+      - MSSQL_PASSWORD=LoremIpsum86
diff --git a/vendor/github.com/jinzhu/gorm/embedded_struct_test.go b/vendor/github.com/jinzhu/gorm/embedded_struct_test.go
new file mode 100755 (executable)
index 0000000..5f8ece5
--- /dev/null
@@ -0,0 +1,91 @@
+package gorm_test
+
+import "testing"
+
+type BasePost struct {
+       Id    int64
+       Title string
+       URL   string
+}
+
+type Author struct {
+       ID    string
+       Name  string
+       Email string
+}
+
+type HNPost struct {
+       BasePost
+       Author  `gorm:"embedded_prefix:user_"` // Embedded struct
+       Upvotes int32
+}
+
+type EngadgetPost struct {
+       BasePost BasePost `gorm:"embedded"`
+       Author   Author   `gorm:"embedded;embedded_prefix:author_"` // Embedded struct
+       ImageUrl string
+}
+
+func TestPrefixColumnNameForEmbeddedStruct(t *testing.T) {
+       dialect := DB.NewScope(&EngadgetPost{}).Dialect()
+       engadgetPostScope := DB.NewScope(&EngadgetPost{})
+       if !dialect.HasColumn(engadgetPostScope.TableName(), "author_id") || !dialect.HasColumn(engadgetPostScope.TableName(), "author_name") || !dialect.HasColumn(engadgetPostScope.TableName(), "author_email") {
+               t.Errorf("should has prefix for embedded columns")
+       }
+
+       if len(engadgetPostScope.PrimaryFields()) != 1 {
+               t.Errorf("should have only one primary field with embedded struct, but got %v", len(engadgetPostScope.PrimaryFields()))
+       }
+
+       hnScope := DB.NewScope(&HNPost{})
+       if !dialect.HasColumn(hnScope.TableName(), "user_id") || !dialect.HasColumn(hnScope.TableName(), "user_name") || !dialect.HasColumn(hnScope.TableName(), "user_email") {
+               t.Errorf("should has prefix for embedded columns")
+       }
+}
+
+func TestSaveAndQueryEmbeddedStruct(t *testing.T) {
+       DB.Save(&HNPost{BasePost: BasePost{Title: "news"}})
+       DB.Save(&HNPost{BasePost: BasePost{Title: "hn_news"}})
+       var news HNPost
+       if err := DB.First(&news, "title = ?", "hn_news").Error; err != nil {
+               t.Errorf("no error should happen when query with embedded struct, but got %v", err)
+       } else if news.Title != "hn_news" {
+               t.Errorf("embedded struct's value should be scanned correctly")
+       }
+
+       DB.Save(&EngadgetPost{BasePost: BasePost{Title: "engadget_news"}})
+       var egNews EngadgetPost
+       if err := DB.First(&egNews, "title = ?", "engadget_news").Error; err != nil {
+               t.Errorf("no error should happen when query with embedded struct, but got %v", err)
+       } else if egNews.BasePost.Title != "engadget_news" {
+               t.Errorf("embedded struct's value should be scanned correctly")
+       }
+
+       if DB.NewScope(&HNPost{}).PrimaryField() == nil {
+               t.Errorf("primary key with embedded struct should works")
+       }
+
+       for _, field := range DB.NewScope(&HNPost{}).Fields() {
+               if field.Name == "BasePost" {
+                       t.Errorf("scope Fields should not contain embedded struct")
+               }
+       }
+}
+
+func TestEmbeddedPointerTypeStruct(t *testing.T) {
+       type HNPost struct {
+               *BasePost
+               Upvotes int32
+       }
+
+       DB.Create(&HNPost{BasePost: &BasePost{Title: "embedded_pointer_type"}})
+
+       var hnPost HNPost
+       if err := DB.First(&hnPost, "title = ?", "embedded_pointer_type").Error; err != nil {
+               t.Errorf("No error should happen when find embedded pointer type, but got %v", err)
+       }
+
+       if hnPost.Title != "embedded_pointer_type" {
+               t.Errorf("Should find correct value for embedded pointer type")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/errors.go b/vendor/github.com/jinzhu/gorm/errors.go
new file mode 100755 (executable)
index 0000000..27c9a92
--- /dev/null
@@ -0,0 +1,72 @@
+package gorm
+
+import (
+       "errors"
+       "strings"
+)
+
+var (
+       // ErrRecordNotFound record not found error, happens when only haven't find any matched data when looking up with a struct, finding a slice won't return this error
+       ErrRecordNotFound = errors.New("record not found")
+       // ErrInvalidSQL invalid SQL error, happens when you passed invalid SQL
+       ErrInvalidSQL = errors.New("invalid SQL")
+       // ErrInvalidTransaction invalid transaction when you are trying to `Commit` or `Rollback`
+       ErrInvalidTransaction = errors.New("no valid transaction")
+       // ErrCantStartTransaction can't start transaction when you are trying to start one with `Begin`
+       ErrCantStartTransaction = errors.New("can't start transaction")
+       // ErrUnaddressable unaddressable value
+       ErrUnaddressable = errors.New("using unaddressable value")
+)
+
+// Errors contains all happened errors
+type Errors []error
+
+// IsRecordNotFoundError returns current error has record not found error or not
+func IsRecordNotFoundError(err error) bool {
+       if errs, ok := err.(Errors); ok {
+               for _, err := range errs {
+                       if err == ErrRecordNotFound {
+                               return true
+                       }
+               }
+       }
+       return err == ErrRecordNotFound
+}
+
+// GetErrors gets all happened errors
+func (errs Errors) GetErrors() []error {
+       return errs
+}
+
+// Add adds an error
+func (errs Errors) Add(newErrors ...error) Errors {
+       for _, err := range newErrors {
+               if err == nil {
+                       continue
+               }
+
+               if errors, ok := err.(Errors); ok {
+                       errs = errs.Add(errors...)
+               } else {
+                       ok = true
+                       for _, e := range errs {
+                               if err == e {
+                                       ok = false
+                               }
+                       }
+                       if ok {
+                               errs = append(errs, err)
+                       }
+               }
+       }
+       return errs
+}
+
+// Error format happened errors
+func (errs Errors) Error() string {
+       var errors = []string{}
+       for _, e := range errs {
+               errors = append(errors, e.Error())
+       }
+       return strings.Join(errors, "; ")
+}
diff --git a/vendor/github.com/jinzhu/gorm/errors_test.go b/vendor/github.com/jinzhu/gorm/errors_test.go
new file mode 100755 (executable)
index 0000000..9a428de
--- /dev/null
@@ -0,0 +1,20 @@
+package gorm_test
+
+import (
+       "errors"
+       "testing"
+
+       "github.com/jinzhu/gorm"
+)
+
+func TestErrorsCanBeUsedOutsideGorm(t *testing.T) {
+       errs := []error{errors.New("First"), errors.New("Second")}
+
+       gErrs := gorm.Errors(errs)
+       gErrs = gErrs.Add(errors.New("Third"))
+       gErrs = gErrs.Add(gErrs)
+
+       if gErrs.Error() != "First; Second; Third" {
+               t.Fatalf("Gave wrong error, got %s", gErrs.Error())
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/field.go b/vendor/github.com/jinzhu/gorm/field.go
new file mode 100755 (executable)
index 0000000..acd06e2
--- /dev/null
@@ -0,0 +1,66 @@
+package gorm
+
+import (
+       "database/sql"
+       "database/sql/driver"
+       "errors"
+       "fmt"
+       "reflect"
+)
+
+// Field model field definition
+type Field struct {
+       *StructField
+       IsBlank bool
+       Field   reflect.Value
+}
+
+// Set set a value to the field
+func (field *Field) Set(value interface{}) (err error) {
+       if !field.Field.IsValid() {
+               return errors.New("field value not valid")
+       }
+
+       if !field.Field.CanAddr() {
+               return ErrUnaddressable
+       }
+
+       reflectValue, ok := value.(reflect.Value)
+       if !ok {
+               reflectValue = reflect.ValueOf(value)
+       }
+
+       fieldValue := field.Field
+       if reflectValue.IsValid() {
+               if reflectValue.Type().ConvertibleTo(fieldValue.Type()) {
+                       fieldValue.Set(reflectValue.Convert(fieldValue.Type()))
+               } else {
+                       if fieldValue.Kind() == reflect.Ptr {
+                               if fieldValue.IsNil() {
+                                       fieldValue.Set(reflect.New(field.Struct.Type.Elem()))
+                               }
+                               fieldValue = fieldValue.Elem()
+                       }
+
+                       if reflectValue.Type().ConvertibleTo(fieldValue.Type()) {
+                               fieldValue.Set(reflectValue.Convert(fieldValue.Type()))
+                       } else if scanner, ok := fieldValue.Addr().Interface().(sql.Scanner); ok {
+                               v := reflectValue.Interface()
+                               if valuer, ok := v.(driver.Valuer); ok {
+                                       if v, err = valuer.Value(); err == nil {
+                                               err = scanner.Scan(v)
+                                       }
+                               } else {
+                                       err = scanner.Scan(v)
+                               }
+                       } else {
+                               err = fmt.Errorf("could not convert argument of field %s from %s to %s", field.Name, reflectValue.Type(), fieldValue.Type())
+                       }
+               }
+       } else {
+               field.Field.Set(reflect.Zero(field.Field.Type()))
+       }
+
+       field.IsBlank = isBlank(field.Field)
+       return err
+}
diff --git a/vendor/github.com/jinzhu/gorm/field_test.go b/vendor/github.com/jinzhu/gorm/field_test.go
new file mode 100755 (executable)
index 0000000..03a3b3b
--- /dev/null
@@ -0,0 +1,67 @@
+package gorm_test
+
+import (
+       "testing"
+
+       "github.com/gofrs/uuid"
+       "github.com/jinzhu/gorm"
+)
+
+type CalculateField struct {
+       gorm.Model
+       Name     string
+       Children []CalculateFieldChild
+       Category CalculateFieldCategory
+       EmbeddedField
+}
+
+type EmbeddedField struct {
+       EmbeddedName string `sql:"NOT NULL;DEFAULT:'hello'"`
+}
+
+type CalculateFieldChild struct {
+       gorm.Model
+       CalculateFieldID uint
+       Name             string
+}
+
+type CalculateFieldCategory struct {
+       gorm.Model
+       CalculateFieldID uint
+       Name             string
+}
+
+func TestCalculateField(t *testing.T) {
+       var field CalculateField
+       var scope = DB.NewScope(&field)
+       if field, ok := scope.FieldByName("Children"); !ok || field.Relationship == nil {
+               t.Errorf("Should calculate fields correctly for the first time")
+       }
+
+       if field, ok := scope.FieldByName("Category"); !ok || field.Relationship == nil {
+               t.Errorf("Should calculate fields correctly for the first time")
+       }
+
+       if field, ok := scope.FieldByName("embedded_name"); !ok {
+               t.Errorf("should find embedded field")
+       } else if _, ok := field.TagSettingsGet("NOT NULL"); !ok {
+               t.Errorf("should find embedded field's tag settings")
+       }
+}
+
+func TestFieldSet(t *testing.T) {
+       type TestFieldSetNullUUID struct {
+               NullUUID uuid.NullUUID
+       }
+       scope := DB.NewScope(&TestFieldSetNullUUID{})
+       field := scope.Fields()[0]
+       err := field.Set(uuid.FromStringOrNil("3034d44a-da03-11e8-b366-4a00070b9f00"))
+       if err != nil {
+               t.Fatal(err)
+       }
+       if id, ok := field.Field.Addr().Interface().(*uuid.NullUUID); !ok {
+               t.Fatal()
+       } else if !id.Valid || id.UUID.String() != "3034d44a-da03-11e8-b366-4a00070b9f00" {
+               t.Fatal(id)
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/interface.go b/vendor/github.com/jinzhu/gorm/interface.go
new file mode 100755 (executable)
index 0000000..55128f7
--- /dev/null
@@ -0,0 +1,20 @@
+package gorm
+
+import "database/sql"
+
+// SQLCommon is the minimal database connection functionality gorm requires.  Implemented by *sql.DB.
+type SQLCommon interface {
+       Exec(query string, args ...interface{}) (sql.Result, error)
+       Prepare(query string) (*sql.Stmt, error)
+       Query(query string, args ...interface{}) (*sql.Rows, error)
+       QueryRow(query string, args ...interface{}) *sql.Row
+}
+
+type sqlDb interface {
+       Begin() (*sql.Tx, error)
+}
+
+type sqlTx interface {
+       Commit() error
+       Rollback() error
+}
diff --git a/vendor/github.com/jinzhu/gorm/join_table_handler.go b/vendor/github.com/jinzhu/gorm/join_table_handler.go
new file mode 100755 (executable)
index 0000000..a036d46
--- /dev/null
@@ -0,0 +1,211 @@
+package gorm
+
+import (
+       "errors"
+       "fmt"
+       "reflect"
+       "strings"
+)
+
+// JoinTableHandlerInterface is an interface for how to handle many2many relations
+type JoinTableHandlerInterface interface {
+       // initialize join table handler
+       Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type)
+       // Table return join table's table name
+       Table(db *DB) string
+       // Add create relationship in join table for source and destination
+       Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error
+       // Delete delete relationship in join table for sources
+       Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error
+       // JoinWith query with `Join` conditions
+       JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB
+       // SourceForeignKeys return source foreign keys
+       SourceForeignKeys() []JoinTableForeignKey
+       // DestinationForeignKeys return destination foreign keys
+       DestinationForeignKeys() []JoinTableForeignKey
+}
+
+// JoinTableForeignKey join table foreign key struct
+type JoinTableForeignKey struct {
+       DBName            string
+       AssociationDBName string
+}
+
+// JoinTableSource is a struct that contains model type and foreign keys
+type JoinTableSource struct {
+       ModelType   reflect.Type
+       ForeignKeys []JoinTableForeignKey
+}
+
+// JoinTableHandler default join table handler
+type JoinTableHandler struct {
+       TableName   string          `sql:"-"`
+       Source      JoinTableSource `sql:"-"`
+       Destination JoinTableSource `sql:"-"`
+}
+
+// SourceForeignKeys return source foreign keys
+func (s *JoinTableHandler) SourceForeignKeys() []JoinTableForeignKey {
+       return s.Source.ForeignKeys
+}
+
+// DestinationForeignKeys return destination foreign keys
+func (s *JoinTableHandler) DestinationForeignKeys() []JoinTableForeignKey {
+       return s.Destination.ForeignKeys
+}
+
+// Setup initialize a default join table handler
+func (s *JoinTableHandler) Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) {
+       s.TableName = tableName
+
+       s.Source = JoinTableSource{ModelType: source}
+       s.Source.ForeignKeys = []JoinTableForeignKey{}
+       for idx, dbName := range relationship.ForeignFieldNames {
+               s.Source.ForeignKeys = append(s.Source.ForeignKeys, JoinTableForeignKey{
+                       DBName:            relationship.ForeignDBNames[idx],
+                       AssociationDBName: dbName,
+               })
+       }
+
+       s.Destination = JoinTableSource{ModelType: destination}
+       s.Destination.ForeignKeys = []JoinTableForeignKey{}
+       for idx, dbName := range relationship.AssociationForeignFieldNames {
+               s.Destination.ForeignKeys = append(s.Destination.ForeignKeys, JoinTableForeignKey{
+                       DBName:            relationship.AssociationForeignDBNames[idx],
+                       AssociationDBName: dbName,
+               })
+       }
+}
+
+// Table return join table's table name
+func (s JoinTableHandler) Table(db *DB) string {
+       return DefaultTableNameHandler(db, s.TableName)
+}
+
+func (s JoinTableHandler) updateConditionMap(conditionMap map[string]interface{}, db *DB, joinTableSources []JoinTableSource, sources ...interface{}) {
+       for _, source := range sources {
+               scope := db.NewScope(source)
+               modelType := scope.GetModelStruct().ModelType
+
+               for _, joinTableSource := range joinTableSources {
+                       if joinTableSource.ModelType == modelType {
+                               for _, foreignKey := range joinTableSource.ForeignKeys {
+                                       if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok {
+                                               conditionMap[foreignKey.DBName] = field.Field.Interface()
+                                       }
+                               }
+                               break
+                       }
+               }
+       }
+}
+
+// Add create relationship in join table for source and destination
+func (s JoinTableHandler) Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error {
+       var (
+               scope        = db.NewScope("")
+               conditionMap = map[string]interface{}{}
+       )
+
+       // Update condition map for source
+       s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source}, source)
+
+       // Update condition map for destination
+       s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Destination}, destination)
+
+       var assignColumns, binVars, conditions []string
+       var values []interface{}
+       for key, value := range conditionMap {
+               assignColumns = append(assignColumns, scope.Quote(key))
+               binVars = append(binVars, `?`)
+               conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key)))
+               values = append(values, value)
+       }
+
+       for _, value := range values {
+               values = append(values, value)
+       }
+
+       quotedTable := scope.Quote(handler.Table(db))
+       sql := fmt.Sprintf(
+               "INSERT INTO %v (%v) SELECT %v %v WHERE NOT EXISTS (SELECT * FROM %v WHERE %v)",
+               quotedTable,
+               strings.Join(assignColumns, ","),
+               strings.Join(binVars, ","),
+               scope.Dialect().SelectFromDummyTable(),
+               quotedTable,
+               strings.Join(conditions, " AND "),
+       )
+
+       return db.Exec(sql, values...).Error
+}
+
+// Delete delete relationship in join table for sources
+func (s JoinTableHandler) Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error {
+       var (
+               scope        = db.NewScope(nil)
+               conditions   []string
+               values       []interface{}
+               conditionMap = map[string]interface{}{}
+       )
+
+       s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source, s.Destination}, sources...)
+
+       for key, value := range conditionMap {
+               conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key)))
+               values = append(values, value)
+       }
+
+       return db.Table(handler.Table(db)).Where(strings.Join(conditions, " AND "), values...).Delete("").Error
+}
+
+// JoinWith query with `Join` conditions
+func (s JoinTableHandler) JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB {
+       var (
+               scope           = db.NewScope(source)
+               tableName       = handler.Table(db)
+               quotedTableName = scope.Quote(tableName)
+               joinConditions  []string
+               values          []interface{}
+       )
+
+       if s.Source.ModelType == scope.GetModelStruct().ModelType {
+               destinationTableName := db.NewScope(reflect.New(s.Destination.ModelType).Interface()).QuotedTableName()
+               for _, foreignKey := range s.Destination.ForeignKeys {
+                       joinConditions = append(joinConditions, fmt.Sprintf("%v.%v = %v.%v", quotedTableName, scope.Quote(foreignKey.DBName), destinationTableName, scope.Quote(foreignKey.AssociationDBName)))
+               }
+
+               var foreignDBNames []string
+               var foreignFieldNames []string
+
+               for _, foreignKey := range s.Source.ForeignKeys {
+                       foreignDBNames = append(foreignDBNames, foreignKey.DBName)
+                       if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok {
+                               foreignFieldNames = append(foreignFieldNames, field.Name)
+                       }
+               }
+
+               foreignFieldValues := scope.getColumnAsArray(foreignFieldNames, scope.Value)
+
+               var condString string
+               if len(foreignFieldValues) > 0 {
+                       var quotedForeignDBNames []string
+                       for _, dbName := range foreignDBNames {
+                               quotedForeignDBNames = append(quotedForeignDBNames, tableName+"."+dbName)
+                       }
+
+                       condString = fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, quotedForeignDBNames), toQueryMarks(foreignFieldValues))
+
+                       keys := scope.getColumnAsArray(foreignFieldNames, scope.Value)
+                       values = append(values, toQueryValues(keys))
+               } else {
+                       condString = fmt.Sprintf("1 <> 1")
+               }
+
+               return db.Joins(fmt.Sprintf("INNER JOIN %v ON %v", quotedTableName, strings.Join(joinConditions, " AND "))).
+                       Where(condString, toQueryValues(foreignFieldValues)...)
+       }
+
+       db.Error = errors.New("wrong source type for join table handler")
+       return db
+}
diff --git a/vendor/github.com/jinzhu/gorm/join_table_test.go b/vendor/github.com/jinzhu/gorm/join_table_test.go
new file mode 100755 (executable)
index 0000000..6d5f427
--- /dev/null
@@ -0,0 +1,117 @@
+package gorm_test
+
+import (
+       "fmt"
+       "strconv"
+       "testing"
+       "time"
+
+       "github.com/jinzhu/gorm"
+)
+
+type Person struct {
+       Id        int
+       Name      string
+       Addresses []*Address `gorm:"many2many:person_addresses;"`
+}
+
+type PersonAddress struct {
+       gorm.JoinTableHandler
+       PersonID  int
+       AddressID int
+       DeletedAt *time.Time
+       CreatedAt time.Time
+}
+
+func (*PersonAddress) Add(handler gorm.JoinTableHandlerInterface, db *gorm.DB, foreignValue interface{}, associationValue interface{}) error {
+       foreignPrimaryKey, _ := strconv.Atoi(fmt.Sprint(db.NewScope(foreignValue).PrimaryKeyValue()))
+       associationPrimaryKey, _ := strconv.Atoi(fmt.Sprint(db.NewScope(associationValue).PrimaryKeyValue()))
+       if result := db.Unscoped().Model(&PersonAddress{}).Where(map[string]interface{}{
+               "person_id":  foreignPrimaryKey,
+               "address_id": associationPrimaryKey,
+       }).Update(map[string]interface{}{
+               "person_id":  foreignPrimaryKey,
+               "address_id": associationPrimaryKey,
+               "deleted_at": gorm.Expr("NULL"),
+       }).RowsAffected; result == 0 {
+               return db.Create(&PersonAddress{
+                       PersonID:  foreignPrimaryKey,
+                       AddressID: associationPrimaryKey,
+               }).Error
+       }
+
+       return nil
+}
+
+func (*PersonAddress) Delete(handler gorm.JoinTableHandlerInterface, db *gorm.DB, sources ...interface{}) error {
+       return db.Delete(&PersonAddress{}).Error
+}
+
+func (pa *PersonAddress) JoinWith(handler gorm.JoinTableHandlerInterface, db *gorm.DB, source interface{}) *gorm.DB {
+       table := pa.Table(db)
+       return db.Joins("INNER JOIN person_addresses ON person_addresses.address_id = addresses.id").Where(fmt.Sprintf("%v.deleted_at IS NULL OR %v.deleted_at <= '0001-01-02'", table, table))
+}
+
+func TestJoinTable(t *testing.T) {
+       DB.Exec("drop table person_addresses;")
+       DB.AutoMigrate(&Person{})
+       DB.SetJoinTableHandler(&Person{}, "Addresses", &PersonAddress{})
+
+       address1 := &Address{Address1: "address 1"}
+       address2 := &Address{Address1: "address 2"}
+       person := &Person{Name: "person", Addresses: []*Address{address1, address2}}
+       DB.Save(person)
+
+       DB.Model(person).Association("Addresses").Delete(address1)
+
+       if DB.Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 1 {
+               t.Errorf("Should found one address")
+       }
+
+       if DB.Model(person).Association("Addresses").Count() != 1 {
+               t.Errorf("Should found one address")
+       }
+
+       if DB.Unscoped().Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 2 {
+               t.Errorf("Found two addresses with Unscoped")
+       }
+
+       if DB.Model(person).Association("Addresses").Clear(); DB.Model(person).Association("Addresses").Count() != 0 {
+               t.Errorf("Should deleted all addresses")
+       }
+}
+
+func TestEmbeddedMany2ManyRelationship(t *testing.T) {
+       type EmbeddedPerson struct {
+               ID        int
+               Name      string
+               Addresses []*Address `gorm:"many2many:person_addresses;"`
+       }
+
+       type NewPerson struct {
+               EmbeddedPerson
+               ExternalID uint
+       }
+       DB.Exec("drop table person_addresses;")
+       DB.AutoMigrate(&NewPerson{})
+
+       address1 := &Address{Address1: "address 1"}
+       address2 := &Address{Address1: "address 2"}
+       person := &NewPerson{ExternalID: 100, EmbeddedPerson: EmbeddedPerson{Name: "person", Addresses: []*Address{address1, address2}}}
+       if err := DB.Save(person).Error; err != nil {
+               t.Errorf("no error should return when save embedded many2many relationship, but got %v", err)
+       }
+
+       if err := DB.Model(person).Association("Addresses").Delete(address1).Error; err != nil {
+               t.Errorf("no error should return when delete embedded many2many relationship, but got %v", err)
+       }
+
+       association := DB.Model(person).Association("Addresses")
+       if count := association.Count(); count != 1 || association.Error != nil {
+               t.Errorf("Should found one address, but got %v, error is %v", count, association.Error)
+       }
+
+       if association.Clear(); association.Count() != 0 {
+               t.Errorf("Should deleted all addresses")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/logger.go b/vendor/github.com/jinzhu/gorm/logger.go
new file mode 100755 (executable)
index 0000000..4324a2e
--- /dev/null
@@ -0,0 +1,119 @@
+package gorm
+
+import (
+       "database/sql/driver"
+       "fmt"
+       "log"
+       "os"
+       "reflect"
+       "regexp"
+       "strconv"
+       "time"
+       "unicode"
+)
+
+var (
+       defaultLogger            = Logger{log.New(os.Stdout, "\r\n", 0)}
+       sqlRegexp                = regexp.MustCompile(`\?`)
+       numericPlaceHolderRegexp = regexp.MustCompile(`\$\d+`)
+)
+
+func isPrintable(s string) bool {
+       for _, r := range s {
+               if !unicode.IsPrint(r) {
+                       return false
+               }
+       }
+       return true
+}
+
+var LogFormatter = func(values ...interface{}) (messages []interface{}) {
+       if len(values) > 1 {
+               var (
+                       sql             string
+                       formattedValues []string
+                       level           = values[0]
+                       currentTime     = "\n\033[33m[" + NowFunc().Format("2006-01-02 15:04:05") + "]\033[0m"
+                       source          = fmt.Sprintf("\033[35m(%v)\033[0m", values[1])
+               )
+
+               messages = []interface{}{source, currentTime}
+
+               if level == "sql" {
+                       // duration
+                       messages = append(messages, fmt.Sprintf(" \033[36;1m[%.2fms]\033[0m ", float64(values[2].(time.Duration).Nanoseconds()/1e4)/100.0))
+                       // sql
+
+                       for _, value := range values[4].([]interface{}) {
+                               indirectValue := reflect.Indirect(reflect.ValueOf(value))
+                               if indirectValue.IsValid() {
+                                       value = indirectValue.Interface()
+                                       if t, ok := value.(time.Time); ok {
+                                               formattedValues = append(formattedValues, fmt.Sprintf("'%v'", t.Format("2006-01-02 15:04:05")))
+                                       } else if b, ok := value.([]byte); ok {
+                                               if str := string(b); isPrintable(str) {
+                                                       formattedValues = append(formattedValues, fmt.Sprintf("'%v'", str))
+                                               } else {
+                                                       formattedValues = append(formattedValues, "'<binary>'")
+                                               }
+                                       } else if r, ok := value.(driver.Valuer); ok {
+                                               if value, err := r.Value(); err == nil && value != nil {
+                                                       formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value))
+                                               } else {
+                                                       formattedValues = append(formattedValues, "NULL")
+                                               }
+                                       } else {
+                                               formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value))
+                                       }
+                               } else {
+                                       formattedValues = append(formattedValues, "NULL")
+                               }
+                       }
+
+                       // differentiate between $n placeholders or else treat like ?
+                       if numericPlaceHolderRegexp.MatchString(values[3].(string)) {
+                               sql = values[3].(string)
+                               for index, value := range formattedValues {
+                                       placeholder := fmt.Sprintf(`\$%d([^\d]|$)`, index+1)
+                                       sql = regexp.MustCompile(placeholder).ReplaceAllString(sql, value+"$1")
+                               }
+                       } else {
+                               formattedValuesLength := len(formattedValues)
+                               for index, value := range sqlRegexp.Split(values[3].(string), -1) {
+                                       sql += value
+                                       if index < formattedValuesLength {
+                                               sql += formattedValues[index]
+                                       }
+                               }
+                       }
+
+                       messages = append(messages, sql)
+                       messages = append(messages, fmt.Sprintf(" \n\033[36;31m[%v]\033[0m ", strconv.FormatInt(values[5].(int64), 10)+" rows affected or returned "))
+               } else {
+                       messages = append(messages, "\033[31;1m")
+                       messages = append(messages, values[2:]...)
+                       messages = append(messages, "\033[0m")
+               }
+       }
+
+       return
+}
+
+type logger interface {
+       Print(v ...interface{})
+}
+
+// LogWriter log writer interface
+type LogWriter interface {
+       Println(v ...interface{})
+}
+
+// Logger default logger
+type Logger struct {
+       LogWriter
+}
+
+// Print format & print log
+func (logger Logger) Print(values ...interface{}) {
+       logger.Println(LogFormatter(values...)...)
+}
diff --git a/vendor/github.com/jinzhu/gorm/main.go b/vendor/github.com/jinzhu/gorm/main.go
new file mode 100755 (executable)
index 0000000..17c75ed
--- /dev/null
@@ -0,0 +1,792 @@
+package gorm
+
+import (
+       "database/sql"
+       "errors"
+       "fmt"
+       "reflect"
+       "strings"
+       "sync"
+       "time"
+)
+
+// DB contains information for current db connection
+type DB struct {
+       Value        interface{}
+       Error        error
+       RowsAffected int64
+
+       // single db
+       db                SQLCommon
+       blockGlobalUpdate bool
+       logMode           int
+       logger            logger
+       search            *search
+       values            sync.Map
+
+       // global db
+       parent        *DB
+       callbacks     *Callback
+       dialect       Dialect
+       singularTable bool
+}
+
+// Open initialize a new db connection, need to import driver first, e.g:
+//
+//     import _ "github.com/go-sql-driver/mysql"
+//     func main() {
+//       db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local")
+//     }
+// GORM has wrapped some drivers, for easier to remember driver's import path, so you could import the mysql driver with
+//    import _ "github.com/jinzhu/gorm/dialects/mysql"
+//    // import _ "github.com/jinzhu/gorm/dialects/postgres"
+//    // import _ "github.com/jinzhu/gorm/dialects/sqlite"
+//    // import _ "github.com/jinzhu/gorm/dialects/mssql"
+func Open(dialect string, args ...interface{}) (db *DB, err error) {
+       if len(args) == 0 {
+               err = errors.New("invalid database source")
+               return nil, err
+       }
+       var source string
+       var dbSQL SQLCommon
+       var ownDbSQL bool
+
+       switch value := args[0].(type) {
+       case string:
+               var driver = dialect
+               if len(args) == 1 {
+                       source = value
+               } else if len(args) >= 2 {
+                       driver = value
+                       source = args[1].(string)
+               }
+               dbSQL, err = sql.Open(driver, source)
+               ownDbSQL = true
+       case SQLCommon:
+               dbSQL = value
+               ownDbSQL = false
+       default:
+               return nil, fmt.Errorf("invalid database source: %v is not a valid type", value)
+       }
+
+       db = &DB{
+               db:        dbSQL,
+               logger:    defaultLogger,
+               callbacks: DefaultCallback,
+               dialect:   newDialect(dialect, dbSQL),
+       }
+       db.parent = db
+       if err != nil {
+               return
+       }
+       // Send a ping to make sure the database connection is alive.
+       if d, ok := dbSQL.(*sql.DB); ok {
+               if err = d.Ping(); err != nil && ownDbSQL {
+                       d.Close()
+               }
+       }
+       return
+}
+
+// New clone a new db connection without search conditions
+func (s *DB) New() *DB {
+       clone := s.clone()
+       clone.search = nil
+       clone.Value = nil
+       return clone
+}
+
+type closer interface {
+       Close() error
+}
+
+// Close close current db connection.  If database connection is not an io.Closer, returns an error.
+func (s *DB) Close() error {
+       if db, ok := s.parent.db.(closer); ok {
+               return db.Close()
+       }
+       return errors.New("can't close current db")
+}
+
+// DB get `*sql.DB` from current connection
+// If the underlying database connection is not a *sql.DB, returns nil
+func (s *DB) DB() *sql.DB {
+       db, _ := s.db.(*sql.DB)
+       return db
+}
+
+// CommonDB return the underlying `*sql.DB` or `*sql.Tx` instance, mainly intended to allow coexistence with legacy non-GORM code.
+func (s *DB) CommonDB() SQLCommon {
+       return s.db
+}
+
+// Dialect get dialect
+func (s *DB) Dialect() Dialect {
+       return s.dialect
+}
+
+// Callback return `Callbacks` container, you could add/change/delete callbacks with it
+//     db.Callback().Create().Register("update_created_at", updateCreated)
+// Refer https://jinzhu.github.io/gorm/development.html#callbacks
+func (s *DB) Callback() *Callback {
+       s.parent.callbacks = s.parent.callbacks.clone()
+       return s.parent.callbacks
+}
+
+// SetLogger replace default logger
+func (s *DB) SetLogger(log logger) {
+       s.logger = log
+}
+
+// LogMode set log mode, `true` for detailed logs, `false` for no log, default, will only print error logs
+func (s *DB) LogMode(enable bool) *DB {
+       if enable {
+               s.logMode = 2
+       } else {
+               s.logMode = 1
+       }
+       return s
+}
+
+// BlockGlobalUpdate if true, generates an error on update/delete without where clause.
+// This is to prevent eventual error with empty objects updates/deletions
+func (s *DB) BlockGlobalUpdate(enable bool) *DB {
+       s.blockGlobalUpdate = enable
+       return s
+}
+
+// HasBlockGlobalUpdate return state of block
+func (s *DB) HasBlockGlobalUpdate() bool {
+       return s.blockGlobalUpdate
+}
+
+// SingularTable use singular table by default
+func (s *DB) SingularTable(enable bool) {
+       modelStructsMap = sync.Map{}
+       s.parent.singularTable = enable
+}
+
+// NewScope create a scope for current operation
+func (s *DB) NewScope(value interface{}) *Scope {
+       dbClone := s.clone()
+       dbClone.Value = value
+       return &Scope{db: dbClone, Search: dbClone.search.clone(), Value: value}
+}
+
+// QueryExpr returns the query as expr object
+func (s *DB) QueryExpr() *expr {
+       scope := s.NewScope(s.Value)
+       scope.InstanceSet("skip_bindvar", true)
+       scope.prepareQuerySQL()
+
+       return Expr(scope.SQL, scope.SQLVars...)
+}
+
+// SubQuery returns the query as sub query
+func (s *DB) SubQuery() *expr {
+       scope := s.NewScope(s.Value)
+       scope.InstanceSet("skip_bindvar", true)
+       scope.prepareQuerySQL()
+
+       return Expr(fmt.Sprintf("(%v)", scope.SQL), scope.SQLVars...)
+}
+
+// Where return a new relation, filter records with given conditions, accepts `map`, `struct` or `string` as conditions, refer http://jinzhu.github.io/gorm/crud.html#query
+func (s *DB) Where(query interface{}, args ...interface{}) *DB {
+       return s.clone().search.Where(query, args...).db
+}
+
+// Or filter records that match before conditions or this one, similar to `Where`
+func (s *DB) Or(query interface{}, args ...interface{}) *DB {
+       return s.clone().search.Or(query, args...).db
+}
+
+// Not filter records that don't match current conditions, similar to `Where`
+func (s *DB) Not(query interface{}, args ...interface{}) *DB {
+       return s.clone().search.Not(query, args...).db
+}
+
+// Limit specify the number of records to be retrieved
+func (s *DB) Limit(limit interface{}) *DB {
+       return s.clone().search.Limit(limit).db
+}
+
+// Offset specify the number of records to skip before starting to return the records
+func (s *DB) Offset(offset interface{}) *DB {
+       return s.clone().search.Offset(offset).db
+}
+
+// Order specify order when retrieve records from database, set reorder to `true` to overwrite defined conditions
+//     db.Order("name DESC")
+//     db.Order("name DESC", true) // reorder
+//     db.Order(gorm.Expr("name = ? DESC", "first")) // sql expression
+func (s *DB) Order(value interface{}, reorder ...bool) *DB {
+       return s.clone().search.Order(value, reorder...).db
+}
+
+// Select specify fields that you want to retrieve from database when querying, by default, will select all fields;
+// When creating/updating, specify fields that you want to save to database
+func (s *DB) Select(query interface{}, args ...interface{}) *DB {
+       return s.clone().search.Select(query, args...).db
+}
+
+// Omit specify fields that you want to ignore when saving to database for creating, updating
+func (s *DB) Omit(columns ...string) *DB {
+       return s.clone().search.Omit(columns...).db
+}
+
+// Group specify the group method on the find
+func (s *DB) Group(query string) *DB {
+       return s.clone().search.Group(query).db
+}
+
+// Having specify HAVING conditions for GROUP BY
+func (s *DB) Having(query interface{}, values ...interface{}) *DB {
+       return s.clone().search.Having(query, values...).db
+}
+
+// Joins specify Joins conditions
+//     db.Joins("JOIN emails ON emails.user_id = users.id AND emails.email = ?", "jinzhu@example.org").Find(&user)
+func (s *DB) Joins(query string, args ...interface{}) *DB {
+       return s.clone().search.Joins(query, args...).db
+}
+
+// Scopes pass current database connection to arguments `func(*DB) *DB`, which could be used to add conditions dynamically
+//     func AmountGreaterThan1000(db *gorm.DB) *gorm.DB {
+//         return db.Where("amount > ?", 1000)
+//     }
+//
+//     func OrderStatus(status []string) func (db *gorm.DB) *gorm.DB {
+//         return func (db *gorm.DB) *gorm.DB {
+//             return db.Scopes(AmountGreaterThan1000).Where("status in (?)", status)
+//         }
+//     }
+//
+//     db.Scopes(AmountGreaterThan1000, OrderStatus([]string{"paid", "shipped"})).Find(&orders)
+// Refer https://jinzhu.github.io/gorm/crud.html#scopes
+func (s *DB) Scopes(funcs ...func(*DB) *DB) *DB {
+       for _, f := range funcs {
+               s = f(s)
+       }
+       return s
+}
+
+// Unscoped return all record including deleted record, refer Soft Delete https://jinzhu.github.io/gorm/crud.html#soft-delete
+func (s *DB) Unscoped() *DB {
+       return s.clone().search.unscoped().db
+}
+
+// Attrs initialize struct with argument if record not found with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate
+func (s *DB) Attrs(attrs ...interface{}) *DB {
+       return s.clone().search.Attrs(attrs...).db
+}
+
+// Assign assign result with argument regardless it is found or not with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate
+func (s *DB) Assign(attrs ...interface{}) *DB {
+       return s.clone().search.Assign(attrs...).db
+}
+
+// First find first record that match given conditions, order by primary key
+func (s *DB) First(out interface{}, where ...interface{}) *DB {
+       newScope := s.NewScope(out)
+       newScope.Search.Limit(1)
+       return newScope.Set("gorm:order_by_primary_key", "ASC").
+               inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Take return a record that match given conditions, the order will depend on the database implementation
+func (s *DB) Take(out interface{}, where ...interface{}) *DB {
+       newScope := s.NewScope(out)
+       newScope.Search.Limit(1)
+       return newScope.inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Last find last record that match given conditions, order by primary key
+func (s *DB) Last(out interface{}, where ...interface{}) *DB {
+       newScope := s.NewScope(out)
+       newScope.Search.Limit(1)
+       return newScope.Set("gorm:order_by_primary_key", "DESC").
+               inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Find find records that match given conditions
+func (s *DB) Find(out interface{}, where ...interface{}) *DB {
+       return s.NewScope(out).inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
+}
+
+//Preloads preloads relations, don`t touch out
+func (s *DB) Preloads(out interface{}) *DB {
+       return s.NewScope(out).InstanceSet("gorm:only_preload", 1).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Scan scan value to a struct
+func (s *DB) Scan(dest interface{}) *DB {
+       return s.NewScope(s.Value).Set("gorm:query_destination", dest).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Row return `*sql.Row` with given conditions
+func (s *DB) Row() *sql.Row {
+       return s.NewScope(s.Value).row()
+}
+
+// Rows return `*sql.Rows` with given conditions
+func (s *DB) Rows() (*sql.Rows, error) {
+       return s.NewScope(s.Value).rows()
+}
+
+// ScanRows scan `*sql.Rows` to give struct
+func (s *DB) ScanRows(rows *sql.Rows, result interface{}) error {
+       var (
+               scope        = s.NewScope(result)
+               clone        = scope.db
+               columns, err = rows.Columns()
+       )
+
+       if clone.AddError(err) == nil {
+               scope.scan(rows, columns, scope.Fields())
+       }
+
+       return clone.Error
+}
+
+// Pluck used to query single column from a model as a map
+//     var ages []int64
+//     db.Find(&users).Pluck("age", &ages)
+func (s *DB) Pluck(column string, value interface{}) *DB {
+       return s.NewScope(s.Value).pluck(column, value).db
+}
+
+// Count get how many records for a model
+func (s *DB) Count(value interface{}) *DB {
+       return s.NewScope(s.Value).count(value).db
+}
+
+// Related get related associations
+func (s *DB) Related(value interface{}, foreignKeys ...string) *DB {
+       return s.NewScope(s.Value).related(value, foreignKeys...).db
+}
+
+// FirstOrInit find first matched record or initialize a new one with given conditions (only works with struct, map conditions)
+// https://jinzhu.github.io/gorm/crud.html#firstorinit
+func (s *DB) FirstOrInit(out interface{}, where ...interface{}) *DB {
+       c := s.clone()
+       if result := c.First(out, where...); result.Error != nil {
+               if !result.RecordNotFound() {
+                       return result
+               }
+               c.NewScope(out).inlineCondition(where...).initialize()
+       } else {
+               c.NewScope(out).updatedAttrsWithValues(c.search.assignAttrs)
+       }
+       return c
+}
+
+// FirstOrCreate find first matched record or create a new one with given conditions (only works with struct, map conditions)
+// https://jinzhu.github.io/gorm/crud.html#firstorcreate
+func (s *DB) FirstOrCreate(out interface{}, where ...interface{}) *DB {
+       c := s.clone()
+       if result := s.First(out, where...); result.Error != nil {
+               if !result.RecordNotFound() {
+                       return result
+               }
+               return c.NewScope(out).inlineCondition(where...).initialize().callCallbacks(c.parent.callbacks.creates).db
+       } else if len(c.search.assignAttrs) > 0 {
+               return c.NewScope(out).InstanceSet("gorm:update_interface", c.search.assignAttrs).callCallbacks(c.parent.callbacks.updates).db
+       }
+       return c
+}
+
+// Update update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
+func (s *DB) Update(attrs ...interface{}) *DB {
+       return s.Updates(toSearchableMap(attrs...), true)
+}
+
+// Updates update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
+func (s *DB) Updates(values interface{}, ignoreProtectedAttrs ...bool) *DB {
+       return s.NewScope(s.Value).
+               Set("gorm:ignore_protected_attrs", len(ignoreProtectedAttrs) > 0).
+               InstanceSet("gorm:update_interface", values).
+               callCallbacks(s.parent.callbacks.updates).db
+}
+
+// UpdateColumn update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
+func (s *DB) UpdateColumn(attrs ...interface{}) *DB {
+       return s.UpdateColumns(toSearchableMap(attrs...))
+}
+
+// UpdateColumns update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
+func (s *DB) UpdateColumns(values interface{}) *DB {
+       return s.NewScope(s.Value).
+               Set("gorm:update_column", true).
+               Set("gorm:save_associations", false).
+               InstanceSet("gorm:update_interface", values).
+               callCallbacks(s.parent.callbacks.updates).db
+}
+
+// Save update value in database, if the value doesn't have primary key, will insert it
+func (s *DB) Save(value interface{}) *DB {
+       scope := s.NewScope(value)
+       if !scope.PrimaryKeyZero() {
+               newDB := scope.callCallbacks(s.parent.callbacks.updates).db
+               if newDB.Error == nil && newDB.RowsAffected == 0 {
+                       return s.New().FirstOrCreate(value)
+               }
+               return newDB
+       }
+       return scope.callCallbacks(s.parent.callbacks.creates).db
+}
+
+// Create insert the value into database
+func (s *DB) Create(value interface{}) *DB {
+       scope := s.NewScope(value)
+       return scope.callCallbacks(s.parent.callbacks.creates).db
+}
+
+// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition
+func (s *DB) Delete(value interface{}, where ...interface{}) *DB {
+       return s.NewScope(value).inlineCondition(where...).callCallbacks(s.parent.callbacks.deletes).db
+}
+
+// Raw use raw sql as conditions, won't run it unless invoked by other methods
+//    db.Raw("SELECT name, age FROM users WHERE name = ?", 3).Scan(&result)
+func (s *DB) Raw(sql string, values ...interface{}) *DB {
+       return s.clone().search.Raw(true).Where(sql, values...).db
+}
+
+// Exec execute raw sql
+func (s *DB) Exec(sql string, values ...interface{}) *DB {
+       scope := s.NewScope(nil)
+       generatedSQL := scope.buildCondition(map[string]interface{}{"query": sql, "args": values}, true)
+       generatedSQL = strings.TrimSuffix(strings.TrimPrefix(generatedSQL, "("), ")")
+       scope.Raw(generatedSQL)
+       return scope.Exec().db
+}
+
+// Model specify the model you would like to run db operations
+//    // update all users's name to `hello`
+//    db.Model(&User{}).Update("name", "hello")
+//    // if user's primary key is non-blank, will use it as condition, then will only update the user's name to `hello`
+//    db.Model(&user).Update("name", "hello")
+func (s *DB) Model(value interface{}) *DB {
+       c := s.clone()
+       c.Value = value
+       return c
+}
+
+// Table specify the table you would like to run db operations
+func (s *DB) Table(name string) *DB {
+       clone := s.clone()
+       clone.search.Table(name)
+       clone.Value = nil
+       return clone
+}
+
+// Debug start debug mode
+func (s *DB) Debug() *DB {
+       return s.clone().LogMode(true)
+}
+
+// Begin begin a transaction
+func (s *DB) Begin() *DB {
+       c := s.clone()
+       if db, ok := c.db.(sqlDb); ok && db != nil {
+               tx, err := db.Begin()
+               c.db = interface{}(tx).(SQLCommon)
+
+               c.dialect.SetDB(c.db)
+               c.AddError(err)
+       } else {
+               c.AddError(ErrCantStartTransaction)
+       }
+       return c
+}
+
+// Commit commit a transaction
+func (s *DB) Commit() *DB {
+       var emptySQLTx *sql.Tx
+       if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
+               s.AddError(db.Commit())
+       } else {
+               s.AddError(ErrInvalidTransaction)
+       }
+       return s
+}
+
+// Rollback rollback a transaction
+func (s *DB) Rollback() *DB {
+       var emptySQLTx *sql.Tx
+       if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
+               s.AddError(db.Rollback())
+       } else {
+               s.AddError(ErrInvalidTransaction)
+       }
+       return s
+}
+
+// NewRecord check if value's primary key is blank
+func (s *DB) NewRecord(value interface{}) bool {
+       return s.NewScope(value).PrimaryKeyZero()
+}
+
+// RecordNotFound check if returning ErrRecordNotFound error
+func (s *DB) RecordNotFound() bool {
+       for _, err := range s.GetErrors() {
+               if err == ErrRecordNotFound {
+                       return true
+               }
+       }
+       return false
+}
+
+// CreateTable create table for models
+func (s *DB) CreateTable(models ...interface{}) *DB {
+       db := s.Unscoped()
+       for _, model := range models {
+               db = db.NewScope(model).createTable().db
+       }
+       return db
+}
+
+// DropTable drop table for models
+func (s *DB) DropTable(values ...interface{}) *DB {
+       db := s.clone()
+       for _, value := range values {
+               if tableName, ok := value.(string); ok {
+                       db = db.Table(tableName)
+               }
+
+               db = db.NewScope(value).dropTable().db
+       }
+       return db
+}
+
+// DropTableIfExists drop table if it is exist
+func (s *DB) DropTableIfExists(values ...interface{}) *DB {
+       db := s.clone()
+       for _, value := range values {
+               if s.HasTable(value) {
+                       db.AddError(s.DropTable(value).Error)
+               }
+       }
+       return db
+}
+
+// HasTable check has table or not
+func (s *DB) HasTable(value interface{}) bool {
+       var (
+               scope     = s.NewScope(value)
+               tableName string
+       )
+
+       if name, ok := value.(string); ok {
+               tableName = name
+       } else {
+               tableName = scope.TableName()
+       }
+
+       has := scope.Dialect().HasTable(tableName)
+       s.AddError(scope.db.Error)
+       return has
+}
+
+// AutoMigrate run auto migration for given models, will only add missing fields, won't delete/change current data
+func (s *DB) AutoMigrate(values ...interface{}) *DB {
+       db := s.Unscoped()
+       for _, value := range values {
+               db = db.NewScope(value).autoMigrate().db
+       }
+       return db
+}
+
+// ModifyColumn modify column to type
+func (s *DB) ModifyColumn(column string, typ string) *DB {
+       scope := s.NewScope(s.Value)
+       scope.modifyColumn(column, typ)
+       return scope.db
+}
+
+// DropColumn drop a column
+func (s *DB) DropColumn(column string) *DB {
+       scope := s.NewScope(s.Value)
+       scope.dropColumn(column)
+       return scope.db
+}
+
+// AddIndex add index for columns with given name
+func (s *DB) AddIndex(indexName string, columns ...string) *DB {
+       scope := s.Unscoped().NewScope(s.Value)
+       scope.addIndex(false, indexName, columns...)
+       return scope.db
+}
+
+// AddUniqueIndex add unique index for columns with given name
+func (s *DB) AddUniqueIndex(indexName string, columns ...string) *DB {
+       scope := s.Unscoped().NewScope(s.Value)
+       scope.addIndex(true, indexName, columns...)
+       return scope.db
+}
+
+// RemoveIndex remove index with name
+func (s *DB) RemoveIndex(indexName string) *DB {
+       scope := s.NewScope(s.Value)
+       scope.removeIndex(indexName)
+       return scope.db
+}
+
+// AddForeignKey Add foreign key to the given scope, e.g:
+//     db.Model(&User{}).AddForeignKey("city_id", "cities(id)", "RESTRICT", "RESTRICT")
+func (s *DB) AddForeignKey(field string, dest string, onDelete string, onUpdate string) *DB {
+       scope := s.NewScope(s.Value)
+       scope.addForeignKey(field, dest, onDelete, onUpdate)
+       return scope.db
+}
+
+// RemoveForeignKey Remove foreign key from the given scope, e.g:
+//     db.Model(&User{}).RemoveForeignKey("city_id", "cities(id)")
+func (s *DB) RemoveForeignKey(field string, dest string) *DB {
+       scope := s.clone().NewScope(s.Value)
+       scope.removeForeignKey(field, dest)
+       return scope.db
+}
+
+// Association start `Association Mode` to handler relations things easir in that mode, refer: https://jinzhu.github.io/gorm/associations.html#association-mode
+func (s *DB) Association(column string) *Association {
+       var err error
+       var scope = s.Set("gorm:association:source", s.Value).NewScope(s.Value)
+
+       if primaryField := scope.PrimaryField(); primaryField.IsBlank {
+               err = errors.New("primary key can't be nil")
+       } else {
+               if field, ok := scope.FieldByName(column); ok {
+                       if field.Relationship == nil || len(field.Relationship.ForeignFieldNames) == 0 {
+                               err = fmt.Errorf("invalid association %v for %v", column, scope.IndirectValue().Type())
+                       } else {
+                               return &Association{scope: scope, column: column, field: field}
+                       }
+               } else {
+                       err = fmt.Errorf("%v doesn't have column %v", scope.IndirectValue().Type(), column)
+               }
+       }
+
+       return &Association{Error: err}
+}
+
+// Preload preload associations with given conditions
+//    db.Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users)
+func (s *DB) Preload(column string, conditions ...interface{}) *DB {
+       return s.clone().search.Preload(column, conditions...).db
+}
+
+// Set set setting by name, which could be used in callbacks, will clone a new db, and update its setting
+func (s *DB) Set(name string, value interface{}) *DB {
+       return s.clone().InstantSet(name, value)
+}
+
+// InstantSet instant set setting, will affect current db
+func (s *DB) InstantSet(name string, value interface{}) *DB {
+       s.values.Store(name, value)
+       return s
+}
+
+// Get get setting by name
+func (s *DB) Get(name string) (value interface{}, ok bool) {
+       value, ok = s.values.Load(name)
+       return
+}
+
+// SetJoinTableHandler set a model's join table handler for a relation
+func (s *DB) SetJoinTableHandler(source interface{}, column string, handler JoinTableHandlerInterface) {
+       scope := s.NewScope(source)
+       for _, field := range scope.GetModelStruct().StructFields {
+               if field.Name == column || field.DBName == column {
+                       if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" {
+                               source := (&Scope{Value: source}).GetModelStruct().ModelType
+                               destination := (&Scope{Value: reflect.New(field.Struct.Type).Interface()}).GetModelStruct().ModelType
+                               handler.Setup(field.Relationship, many2many, source, destination)
+                               field.Relationship.JoinTableHandler = handler
+                               if table := handler.Table(s); scope.Dialect().HasTable(table) {
+                                       s.Table(table).AutoMigrate(handler)
+                               }
+                       }
+               }
+       }
+}
+
+// AddError add error to the db
+func (s *DB) AddError(err error) error {
+       if err != nil {
+               if err != ErrRecordNotFound {
+                       if s.logMode == 0 {
+                               go s.print(fileWithLineNum(), err)
+                       } else {
+                               s.log(err)
+                       }
+
+                       errors := Errors(s.GetErrors())
+                       errors = errors.Add(err)
+                       if len(errors) > 1 {
+                               err = errors
+                       }
+               }
+
+               s.Error = err
+       }
+       return err
+}
+
+// GetErrors get happened errors from the db
+func (s *DB) GetErrors() []error {
+       if errs, ok := s.Error.(Errors); ok {
+               return errs
+       } else if s.Error != nil {
+               return []error{s.Error}
+       }
+       return []error{}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Private Methods For DB
+////////////////////////////////////////////////////////////////////////////////
+
+func (s *DB) clone() *DB {
+       db := &DB{
+               db:                s.db,
+               parent:            s.parent,
+               logger:            s.logger,
+               logMode:           s.logMode,
+               Value:             s.Value,
+               Error:             s.Error,
+               blockGlobalUpdate: s.blockGlobalUpdate,
+               dialect:           newDialect(s.dialect.GetName(), s.db),
+       }
+
+       s.values.Range(func(k, v interface{}) bool {
+               db.values.Store(k, v)
+               return true
+       })
+
+       if s.search == nil {
+               db.search = &search{limit: -1, offset: -1}
+       } else {
+               db.search = s.search.clone()
+       }
+
+       db.search.db = db
+       return db
+}
+
+func (s *DB) print(v ...interface{}) {
+       s.logger.Print(v...)
+}
+
+func (s *DB) log(v ...interface{}) {
+       if s != nil && s.logMode == 2 {
+               s.print(append([]interface{}{"log", fileWithLineNum()}, v...)...)
+       }
+}
+
+func (s *DB) slog(sql string, t time.Time, vars ...interface{}) {
+       if s.logMode == 2 {
+               s.print("sql", fileWithLineNum(), NowFunc().Sub(t), sql, vars, s.RowsAffected)
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/main_test.go b/vendor/github.com/jinzhu/gorm/main_test.go
new file mode 100755 (executable)
index 0000000..94d2fa3
--- /dev/null
@@ -0,0 +1,1108 @@
+package gorm_test
+
+import (
+       "database/sql"
+       "database/sql/driver"
+       "fmt"
+       "os"
+       "path/filepath"
+       "reflect"
+       "strconv"
+       "strings"
+       "testing"
+       "time"
+
+       "github.com/erikstmartin/go-testdb"
+       "github.com/jinzhu/gorm"
+       _ "github.com/jinzhu/gorm/dialects/mssql"
+       _ "github.com/jinzhu/gorm/dialects/mysql"
+       "github.com/jinzhu/gorm/dialects/postgres"
+       _ "github.com/jinzhu/gorm/dialects/sqlite"
+       "github.com/jinzhu/now"
+)
+
+var (
+       DB                 *gorm.DB
+       t1, t2, t3, t4, t5 time.Time
+)
+
+func init() {
+       var err error
+
+       if DB, err = OpenTestConnection(); err != nil {
+               panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err))
+       }
+
+       runMigration()
+}
+
+func OpenTestConnection() (db *gorm.DB, err error) {
+       dbDSN := os.Getenv("GORM_DSN")
+       switch os.Getenv("GORM_DIALECT") {
+       case "mysql":
+               fmt.Println("testing mysql...")
+               if dbDSN == "" {
+                       dbDSN = "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True"
+               }
+               db, err = gorm.Open("mysql", dbDSN)
+       case "postgres":
+               fmt.Println("testing postgres...")
+               if dbDSN == "" {
+                       dbDSN = "user=gorm password=gorm DB.name=gorm port=9920 sslmode=disable"
+               }
+               db, err = gorm.Open("postgres", dbDSN)
+       case "mssql":
+               // CREATE LOGIN gorm WITH PASSWORD = 'LoremIpsum86';
+               // CREATE DATABASE gorm;
+               // USE gorm;
+               // CREATE USER gorm FROM LOGIN gorm;
+               // sp_changedbowner 'gorm';
+               fmt.Println("testing mssql...")
+               if dbDSN == "" {
+                       dbDSN = "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm"
+               }
+               db, err = gorm.Open("mssql", dbDSN)
+       default:
+               fmt.Println("testing sqlite3...")
+               db, err = gorm.Open("sqlite3", filepath.Join(os.TempDir(), "gorm.db"))
+       }
+
+       // db.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)})
+       // db.SetLogger(log.New(os.Stdout, "\r\n", 0))
+       if debug := os.Getenv("DEBUG"); debug == "true" {
+               db.LogMode(true)
+       } else if debug == "false" {
+               db.LogMode(false)
+       }
+
+       db.DB().SetMaxIdleConns(10)
+
+       return
+}
+
+func TestOpen_ReturnsError_WithBadArgs(t *testing.T) {
+       stringRef := "foo"
+       testCases := []interface{}{42, time.Now(), &stringRef}
+       for _, tc := range testCases {
+               t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) {
+                       _, err := gorm.Open("postgresql", tc)
+                       if err == nil {
+                               t.Error("Should got error with invalid database source")
+                       }
+                       if !strings.HasPrefix(err.Error(), "invalid database source:") {
+                               t.Errorf("Should got error starting with \"invalid database source:\", but got %q", err.Error())
+                       }
+               })
+       }
+}
+
+func TestStringPrimaryKey(t *testing.T) {
+       type UUIDStruct struct {
+               ID   string `gorm:"primary_key"`
+               Name string
+       }
+       DB.DropTable(&UUIDStruct{})
+       DB.AutoMigrate(&UUIDStruct{})
+
+       data := UUIDStruct{ID: "uuid", Name: "hello"}
+       if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello" {
+               t.Errorf("string primary key should not be populated")
+       }
+
+       data = UUIDStruct{ID: "uuid", Name: "hello world"}
+       if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello world" {
+               t.Errorf("string primary key should not be populated")
+       }
+}
+
+func TestExceptionsWithInvalidSql(t *testing.T) {
+       var columns []string
+       if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
+               t.Errorf("Should got error with invalid SQL")
+       }
+
+       if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
+               t.Errorf("Should got error with invalid SQL")
+       }
+
+       if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil {
+               t.Errorf("Should got error with invalid SQL")
+       }
+
+       var count1, count2 int64
+       DB.Model(&User{}).Count(&count1)
+       if count1 <= 0 {
+               t.Errorf("Should find some users")
+       }
+
+       if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil {
+               t.Errorf("Should got error with invalid SQL")
+       }
+
+       DB.Model(&User{}).Count(&count2)
+       if count1 != count2 {
+               t.Errorf("No user should not be deleted by invalid SQL")
+       }
+}
+
+func TestSetTable(t *testing.T) {
+       DB.Create(getPreparedUser("pluck_user1", "pluck_user"))
+       DB.Create(getPreparedUser("pluck_user2", "pluck_user"))
+       DB.Create(getPreparedUser("pluck_user3", "pluck_user"))
+
+       if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil {
+               t.Error("No errors should happen if set table for pluck", err)
+       }
+
+       var users []User
+       if DB.Table("users").Find(&[]User{}).Error != nil {
+               t.Errorf("No errors should happen if set table for find")
+       }
+
+       if DB.Table("invalid_table").Find(&users).Error == nil {
+               t.Errorf("Should got error when table is set to an invalid table")
+       }
+
+       DB.Exec("drop table deleted_users;")
+       if DB.Table("deleted_users").CreateTable(&User{}).Error != nil {
+               t.Errorf("Create table with specified table")
+       }
+
+       DB.Table("deleted_users").Save(&User{Name: "DeletedUser"})
+
+       var deletedUsers []User
+       DB.Table("deleted_users").Find(&deletedUsers)
+       if len(deletedUsers) != 1 {
+               t.Errorf("Query from specified table")
+       }
+
+       DB.Save(getPreparedUser("normal_user", "reset_table"))
+       DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table"))
+       var user1, user2, user3 User
+       DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3)
+       if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") {
+               t.Errorf("unset specified table with blank string")
+       }
+}
+
+type Order struct {
+}
+
+type Cart struct {
+}
+
+func (c Cart) TableName() string {
+       return "shopping_cart"
+}
+
+func TestHasTable(t *testing.T) {
+       type Foo struct {
+               Id    int
+               Stuff string
+       }
+       DB.DropTable(&Foo{})
+
+       // Table should not exist at this point, HasTable should return false
+       if ok := DB.HasTable("foos"); ok {
+               t.Errorf("Table should not exist, but does")
+       }
+       if ok := DB.HasTable(&Foo{}); ok {
+               t.Errorf("Table should not exist, but does")
+       }
+
+       // We create the table
+       if err := DB.CreateTable(&Foo{}).Error; err != nil {
+               t.Errorf("Table should be created")
+       }
+
+       // And now it should exits, and HasTable should return true
+       if ok := DB.HasTable("foos"); !ok {
+               t.Errorf("Table should exist, but HasTable informs it does not")
+       }
+       if ok := DB.HasTable(&Foo{}); !ok {
+               t.Errorf("Table should exist, but HasTable informs it does not")
+       }
+}
+
+func TestTableName(t *testing.T) {
+       DB := DB.Model("")
+       if DB.NewScope(Order{}).TableName() != "orders" {
+               t.Errorf("Order's table name should be orders")
+       }
+
+       if DB.NewScope(&Order{}).TableName() != "orders" {
+               t.Errorf("&Order's table name should be orders")
+       }
+
+       if DB.NewScope([]Order{}).TableName() != "orders" {
+               t.Errorf("[]Order's table name should be orders")
+       }
+
+       if DB.NewScope(&[]Order{}).TableName() != "orders" {
+               t.Errorf("&[]Order's table name should be orders")
+       }
+
+       DB.SingularTable(true)
+       if DB.NewScope(Order{}).TableName() != "order" {
+               t.Errorf("Order's singular table name should be order")
+       }
+
+       if DB.NewScope(&Order{}).TableName() != "order" {
+               t.Errorf("&Order's singular table name should be order")
+       }
+
+       if DB.NewScope([]Order{}).TableName() != "order" {
+               t.Errorf("[]Order's singular table name should be order")
+       }
+
+       if DB.NewScope(&[]Order{}).TableName() != "order" {
+               t.Errorf("&[]Order's singular table name should be order")
+       }
+
+       if DB.NewScope(&Cart{}).TableName() != "shopping_cart" {
+               t.Errorf("&Cart's singular table name should be shopping_cart")
+       }
+
+       if DB.NewScope(Cart{}).TableName() != "shopping_cart" {
+               t.Errorf("Cart's singular table name should be shopping_cart")
+       }
+
+       if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" {
+               t.Errorf("&[]Cart's singular table name should be shopping_cart")
+       }
+
+       if DB.NewScope([]Cart{}).TableName() != "shopping_cart" {
+               t.Errorf("[]Cart's singular table name should be shopping_cart")
+       }
+       DB.SingularTable(false)
+}
+
+func TestNullValues(t *testing.T) {
+       DB.DropTable(&NullValue{})
+       DB.AutoMigrate(&NullValue{})
+
+       if err := DB.Save(&NullValue{
+               Name:    sql.NullString{String: "hello", Valid: true},
+               Gender:  &sql.NullString{String: "M", Valid: true},
+               Age:     sql.NullInt64{Int64: 18, Valid: true},
+               Male:    sql.NullBool{Bool: true, Valid: true},
+               Height:  sql.NullFloat64{Float64: 100.11, Valid: true},
+               AddedAt: NullTime{Time: time.Now(), Valid: true},
+       }).Error; err != nil {
+               t.Errorf("Not error should raise when test null value")
+       }
+
+       var nv NullValue
+       DB.First(&nv, "name = ?", "hello")
+
+       if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true {
+               t.Errorf("Should be able to fetch null value")
+       }
+
+       if err := DB.Save(&NullValue{
+               Name:    sql.NullString{String: "hello-2", Valid: true},
+               Gender:  &sql.NullString{String: "F", Valid: true},
+               Age:     sql.NullInt64{Int64: 18, Valid: false},
+               Male:    sql.NullBool{Bool: true, Valid: true},
+               Height:  sql.NullFloat64{Float64: 100.11, Valid: true},
+               AddedAt: NullTime{Time: time.Now(), Valid: false},
+       }).Error; err != nil {
+               t.Errorf("Not error should raise when test null value")
+       }
+
+       var nv2 NullValue
+       DB.First(&nv2, "name = ?", "hello-2")
+       if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false {
+               t.Errorf("Should be able to fetch null value")
+       }
+
+       if err := DB.Save(&NullValue{
+               Name:    sql.NullString{String: "hello-3", Valid: false},
+               Gender:  &sql.NullString{String: "M", Valid: true},
+               Age:     sql.NullInt64{Int64: 18, Valid: false},
+               Male:    sql.NullBool{Bool: true, Valid: true},
+               Height:  sql.NullFloat64{Float64: 100.11, Valid: true},
+               AddedAt: NullTime{Time: time.Now(), Valid: false},
+       }).Error; err == nil {
+               t.Errorf("Can't save because of name can't be null")
+       }
+}
+
+func TestNullValuesWithFirstOrCreate(t *testing.T) {
+       var nv1 = NullValue{
+               Name:   sql.NullString{String: "first_or_create", Valid: true},
+               Gender: &sql.NullString{String: "M", Valid: true},
+       }
+
+       var nv2 NullValue
+       result := DB.Where(nv1).FirstOrCreate(&nv2)
+
+       if result.RowsAffected != 1 {
+               t.Errorf("RowsAffected should be 1 after create some record")
+       }
+
+       if result.Error != nil {
+               t.Errorf("Should not raise any error, but got %v", result.Error)
+       }
+
+       if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" {
+               t.Errorf("first or create with nullvalues")
+       }
+
+       if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error; err != nil {
+               t.Errorf("Should not raise any error, but got %v", err)
+       }
+
+       if nv2.Age.Int64 != 18 {
+               t.Errorf("should update age to 18")
+       }
+}
+
+func TestTransaction(t *testing.T) {
+       tx := DB.Begin()
+       u := User{Name: "transcation"}
+       if err := tx.Save(&u).Error; err != nil {
+               t.Errorf("No error should raise")
+       }
+
+       if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil {
+               t.Errorf("Should find saved record")
+       }
+
+       if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil {
+               t.Errorf("Should return the underlying sql.Tx")
+       }
+
+       tx.Rollback()
+
+       if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil {
+               t.Errorf("Should not find record after rollback")
+       }
+
+       tx2 := DB.Begin()
+       u2 := User{Name: "transcation-2"}
+       if err := tx2.Save(&u2).Error; err != nil {
+               t.Errorf("No error should raise")
+       }
+
+       if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
+               t.Errorf("Should find saved record")
+       }
+
+       tx2.Commit()
+
+       if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
+               t.Errorf("Should be able to find committed record")
+       }
+}
+
+func TestRow(t *testing.T) {
+       user1 := User{Name: "RowUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "RowUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "RowUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row()
+       var age int64
+       row.Scan(&age)
+       if age != 10 {
+               t.Errorf("Scan with Row")
+       }
+}
+
+func TestRows(t *testing.T) {
+       user1 := User{Name: "RowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "RowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "RowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
+       if err != nil {
+               t.Errorf("Not error should happen, got %v", err)
+       }
+
+       count := 0
+       for rows.Next() {
+               var name string
+               var age int64
+               rows.Scan(&name, &age)
+               count++
+       }
+
+       if count != 2 {
+               t.Errorf("Should found two records")
+       }
+}
+
+func TestScanRows(t *testing.T) {
+       user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
+       if err != nil {
+               t.Errorf("Not error should happen, got %v", err)
+       }
+
+       type Result struct {
+               Name string
+               Age  int
+       }
+
+       var results []Result
+       for rows.Next() {
+               var result Result
+               if err := DB.ScanRows(rows, &result); err != nil {
+                       t.Errorf("should get no error, but got %v", err)
+               }
+               results = append(results, result)
+       }
+
+       if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) {
+               t.Errorf("Should find expected results")
+       }
+}
+
+func TestScan(t *testing.T) {
+       user1 := User{Name: "ScanUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "ScanUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "ScanUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       type result struct {
+               Name string
+               Age  int
+       }
+
+       var res result
+       DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res)
+       if res.Name != user3.Name {
+               t.Errorf("Scan into struct should work")
+       }
+
+       var doubleAgeRes = &result{}
+       if err := DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes).Error; err != nil {
+               t.Errorf("Scan to pointer of pointer")
+       }
+       if doubleAgeRes.Age != res.Age*2 {
+               t.Errorf("Scan double age as age")
+       }
+
+       var ress []result
+       DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress)
+       if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
+               t.Errorf("Scan into struct map")
+       }
+}
+
+func TestRaw(t *testing.T) {
+       user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       type result struct {
+               Name  string
+               Email string
+       }
+
+       var ress []result
+       DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress)
+       if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
+               t.Errorf("Raw with scan")
+       }
+
+       rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows()
+       count := 0
+       for rows.Next() {
+               count++
+       }
+       if count != 1 {
+               t.Errorf("Raw with Rows should find one record with name 3")
+       }
+
+       DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name})
+       if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.ErrRecordNotFound {
+               t.Error("Raw sql to update records")
+       }
+}
+
+func TestGroup(t *testing.T) {
+       rows, err := DB.Select("name").Table("users").Group("name").Rows()
+
+       if err == nil {
+               defer rows.Close()
+               for rows.Next() {
+                       var name string
+                       rows.Scan(&name)
+               }
+       } else {
+               t.Errorf("Should not raise any error")
+       }
+}
+
+func TestJoins(t *testing.T) {
+       var user = User{
+               Name:       "joins",
+               CreditCard: CreditCard{Number: "411111111111"},
+               Emails:     []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}},
+       }
+       DB.Save(&user)
+
+       var users1 []User
+       DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1)
+       if len(users1) != 2 {
+               t.Errorf("should find two users using left join")
+       }
+
+       var users2 []User
+       DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Where("name = ?", "joins").First(&users2)
+       if len(users2) != 1 {
+               t.Errorf("should find one users using left join with conditions")
+       }
+
+       var users3 []User
+       DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3)
+       if len(users3) != 1 {
+               t.Errorf("should find one users using multiple left join conditions")
+       }
+
+       var users4 []User
+       DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4)
+       if len(users4) != 0 {
+               t.Errorf("should find no user when searching with unexisting credit card")
+       }
+
+       var users5 []User
+       db5 := DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where(User{Id: 1}).Where(Email{Id: 1}).Not(Email{Id: 10}).First(&users5)
+       if db5.Error != nil {
+               t.Errorf("Should not raise error for join where identical fields in different tables. Error: %s", db5.Error.Error())
+       }
+}
+
+type JoinedIds struct {
+       UserID           int64 `gorm:"column:id"`
+       BillingAddressID int64 `gorm:"column:id"`
+       EmailID          int64 `gorm:"column:id"`
+}
+
+func TestScanIdenticalColumnNames(t *testing.T) {
+       var user = User{
+               Name:  "joinsIds",
+               Email: "joinIds@example.com",
+               BillingAddress: Address{
+                       Address1: "One Park Place",
+               },
+               Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}},
+       }
+       DB.Save(&user)
+
+       var users []JoinedIds
+       DB.Select("users.id, addresses.id, emails.id").Table("users").
+               Joins("left join addresses on users.billing_address_id = addresses.id").
+               Joins("left join emails on emails.user_id = users.id").
+               Where("name = ?", "joinsIds").Scan(&users)
+
+       if len(users) != 2 {
+               t.Fatal("should find two rows using left join")
+       }
+
+       if user.Id != users[0].UserID {
+               t.Errorf("Expected result row to contain UserID %d, but got %d", user.Id, users[0].UserID)
+       }
+       if user.Id != users[1].UserID {
+               t.Errorf("Expected result row to contain UserID %d, but got %d", user.Id, users[1].UserID)
+       }
+
+       if user.BillingAddressID.Int64 != users[0].BillingAddressID {
+               t.Errorf("Expected result row to contain BillingAddressID %d, but got %d", user.BillingAddressID.Int64, users[0].BillingAddressID)
+       }
+       if user.BillingAddressID.Int64 != users[1].BillingAddressID {
+               t.Errorf("Expected result row to contain BillingAddressID %d, but got %d", user.BillingAddressID.Int64, users[0].BillingAddressID)
+       }
+
+       if users[0].EmailID == users[1].EmailID {
+               t.Errorf("Email ids should be unique. Got %d and %d", users[0].EmailID, users[1].EmailID)
+       }
+
+       if int64(user.Emails[0].Id) != users[0].EmailID && int64(user.Emails[1].Id) != users[0].EmailID {
+               t.Errorf("Expected result row ID to be either %d or %d, but was %d", user.Emails[0].Id, user.Emails[1].Id, users[0].EmailID)
+       }
+
+       if int64(user.Emails[0].Id) != users[1].EmailID && int64(user.Emails[1].Id) != users[1].EmailID {
+               t.Errorf("Expected result row ID to be either %d or %d, but was %d", user.Emails[0].Id, user.Emails[1].Id, users[1].EmailID)
+       }
+}
+
+func TestJoinsWithSelect(t *testing.T) {
+       type result struct {
+               Name  string
+               Email string
+       }
+
+       user := User{
+               Name:   "joins_with_select",
+               Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}},
+       }
+       DB.Save(&user)
+
+       var results []result
+       DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results)
+       if len(results) != 2 || results[0].Email != "join1@example.com" || results[1].Email != "join2@example.com" {
+               t.Errorf("Should find all two emails with Join select")
+       }
+}
+
+func TestHaving(t *testing.T) {
+       rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows()
+
+       if err == nil {
+               defer rows.Close()
+               for rows.Next() {
+                       var name string
+                       var total int64
+                       rows.Scan(&name, &total)
+
+                       if name == "2" && total != 1 {
+                               t.Errorf("Should have one user having name 2")
+                       }
+                       if name == "3" && total != 2 {
+                               t.Errorf("Should have two users having name 3")
+                       }
+               }
+       } else {
+               t.Errorf("Should not raise any error")
+       }
+}
+
+func TestQueryBuilderSubselectInWhere(t *testing.T) {
+       user := User{Name: "query_expr_select_ruser1", Email: "root@user1.com", Age: 32}
+       DB.Save(&user)
+       user = User{Name: "query_expr_select_ruser2", Email: "nobody@user2.com", Age: 16}
+       DB.Save(&user)
+       user = User{Name: "query_expr_select_ruser3", Email: "root@user3.com", Age: 64}
+       DB.Save(&user)
+       user = User{Name: "query_expr_select_ruser4", Email: "somebody@user3.com", Age: 128}
+       DB.Save(&user)
+
+       var users []User
+       DB.Select("*").Where("name IN (?)", DB.
+               Select("name").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
+
+       if len(users) != 4 {
+               t.Errorf("Four users should be found, instead found %d", len(users))
+       }
+
+       DB.Select("*").Where("name LIKE ?", "query_expr_select%").Where("age >= (?)", DB.
+               Select("AVG(age)").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
+
+       if len(users) != 2 {
+               t.Errorf("Two users should be found, instead found %d", len(users))
+       }
+}
+
+func TestQueryBuilderRawQueryWithSubquery(t *testing.T) {
+       user := User{Name: "subquery_test_user1", Age: 10}
+       DB.Save(&user)
+       user = User{Name: "subquery_test_user2", Age: 11}
+       DB.Save(&user)
+       user = User{Name: "subquery_test_user3", Age: 12}
+       DB.Save(&user)
+
+       var count int
+       err := DB.Raw("select count(*) from (?) tmp",
+               DB.Table("users").
+                       Select("name").
+                       Where("age >= ? and name in (?)", 10, []string{"subquery_test_user1", "subquery_test_user2"}).
+                       Group("name").
+                       QueryExpr(),
+       ).Count(&count).Error
+
+       if err != nil {
+               t.Errorf("Expected to get no errors, but got %v", err)
+       }
+       if count != 2 {
+               t.Errorf("Row count must be 2, instead got %d", count)
+       }
+
+       err = DB.Raw("select count(*) from (?) tmp",
+               DB.Table("users").
+                       Select("name").
+                       Where("name LIKE ?", "subquery_test%").
+                       Not("age <= ?", 10).Not("name in (?)", []string{"subquery_test_user1", "subquery_test_user2"}).
+                       Group("name").
+                       QueryExpr(),
+       ).Count(&count).Error
+
+       if err != nil {
+               t.Errorf("Expected to get no errors, but got %v", err)
+       }
+       if count != 1 {
+               t.Errorf("Row count must be 1, instead got %d", count)
+       }
+}
+
+func TestQueryBuilderSubselectInHaving(t *testing.T) {
+       user := User{Name: "query_expr_having_ruser1", Email: "root@user1.com", Age: 64}
+       DB.Save(&user)
+       user = User{Name: "query_expr_having_ruser2", Email: "root@user2.com", Age: 128}
+       DB.Save(&user)
+       user = User{Name: "query_expr_having_ruser3", Email: "root@user1.com", Age: 64}
+       DB.Save(&user)
+       user = User{Name: "query_expr_having_ruser4", Email: "root@user2.com", Age: 128}
+       DB.Save(&user)
+
+       var users []User
+       DB.Select("AVG(age) as avgage").Where("name LIKE ?", "query_expr_having_%").Group("email").Having("AVG(age) > (?)", DB.
+               Select("AVG(age)").Where("name LIKE ?", "query_expr_having_%").Table("users").QueryExpr()).Find(&users)
+
+       if len(users) != 1 {
+               t.Errorf("Two user group should be found, instead found %d", len(users))
+       }
+}
+
+func DialectHasTzSupport() bool {
+       // NB: mssql and FoundationDB do not support time zones.
+       if dialect := os.Getenv("GORM_DIALECT"); dialect == "foundation" {
+               return false
+       }
+       return true
+}
+
+func TestTimeWithZone(t *testing.T) {
+       var format = "2006-01-02 15:04:05 -0700"
+       var times []time.Time
+       GMT8, _ := time.LoadLocation("Asia/Shanghai")
+       times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8))
+       times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC))
+
+       for index, vtime := range times {
+               name := "time_with_zone_" + strconv.Itoa(index)
+               user := User{Name: name, Birthday: &vtime}
+
+               if !DialectHasTzSupport() {
+                       // If our driver dialect doesn't support TZ's, just use UTC for everything here.
+                       utcBirthday := user.Birthday.UTC()
+                       user.Birthday = &utcBirthday
+               }
+
+               DB.Save(&user)
+               expectedBirthday := "2013-02-18 17:51:49 +0000"
+               foundBirthday := user.Birthday.UTC().Format(format)
+               if foundBirthday != expectedBirthday {
+                       t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
+               }
+
+               var findUser, findUser2, findUser3 User
+               DB.First(&findUser, "name = ?", name)
+               foundBirthday = findUser.Birthday.UTC().Format(format)
+               if foundBirthday != expectedBirthday {
+                       t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
+               }
+
+               if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() {
+                       t.Errorf("User should be found")
+               }
+
+               if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() {
+                       t.Errorf("User should not be found")
+               }
+       }
+}
+
+func TestHstore(t *testing.T) {
+       type Details struct {
+               Id   int64
+               Bulk postgres.Hstore
+       }
+
+       if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" {
+               t.Skip()
+       }
+
+       if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil {
+               fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m")
+               panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err))
+       }
+
+       DB.Exec("drop table details")
+
+       if err := DB.CreateTable(&Details{}).Error; err != nil {
+               panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
+       }
+
+       bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait"
+       bulk := map[string]*string{
+               "bankAccountId": &bankAccountId,
+               "phoneNumber":   &phoneNumber,
+               "opinion":       &opinion,
+       }
+       d := Details{Bulk: bulk}
+       DB.Save(&d)
+
+       var d2 Details
+       if err := DB.First(&d2).Error; err != nil {
+               t.Errorf("Got error when tried to fetch details: %+v", err)
+       }
+
+       for k := range bulk {
+               if r, ok := d2.Bulk[k]; ok {
+                       if res, _ := bulk[k]; *res != *r {
+                               t.Errorf("Details should be equal")
+                       }
+               } else {
+                       t.Errorf("Details should be existed")
+               }
+       }
+}
+
+func TestSetAndGet(t *testing.T) {
+       if value, ok := DB.Set("hello", "world").Get("hello"); !ok {
+               t.Errorf("Should be able to get setting after set")
+       } else {
+               if value.(string) != "world" {
+                       t.Errorf("Setted value should not be changed")
+               }
+       }
+
+       if _, ok := DB.Get("non_existing"); ok {
+               t.Errorf("Get non existing key should return error")
+       }
+}
+
+func TestCompatibilityMode(t *testing.T) {
+       DB, _ := gorm.Open("testdb", "")
+       testdb.SetQueryFunc(func(query string) (driver.Rows, error) {
+               columns := []string{"id", "name", "age"}
+               result := `
+               1,Tim,20
+               2,Joe,25
+               3,Bob,30
+               `
+               return testdb.RowsFromCSVString(columns, result), nil
+       })
+
+       var users []User
+       DB.Find(&users)
+       if (users[0].Name != "Tim") || len(users) != 3 {
+               t.Errorf("Unexcepted result returned")
+       }
+}
+
+func TestOpenExistingDB(t *testing.T) {
+       DB.Save(&User{Name: "jnfeinstein"})
+       dialect := os.Getenv("GORM_DIALECT")
+
+       db, err := gorm.Open(dialect, DB.DB())
+       if err != nil {
+               t.Errorf("Should have wrapped the existing DB connection")
+       }
+
+       var user User
+       if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.ErrRecordNotFound {
+               t.Errorf("Should have found existing record")
+       }
+}
+
+func TestDdlErrors(t *testing.T) {
+       var err error
+
+       if err = DB.Close(); err != nil {
+               t.Errorf("Closing DDL test db connection err=%s", err)
+       }
+       defer func() {
+               // Reopen DB connection.
+               if DB, err = OpenTestConnection(); err != nil {
+                       t.Fatalf("Failed re-opening db connection: %s", err)
+               }
+       }()
+
+       if err := DB.Find(&User{}).Error; err == nil {
+               t.Errorf("Expected operation on closed db to produce an error, but err was nil")
+       }
+}
+
+func TestOpenWithOneParameter(t *testing.T) {
+       db, err := gorm.Open("dialect")
+       if db != nil {
+               t.Error("Open with one parameter returned non nil for db")
+       }
+       if err == nil {
+               t.Error("Open with one parameter returned err as nil")
+       }
+}
+
+func TestSaveAssociations(t *testing.T) {
+       db := DB.New()
+       deltaAddressCount := 0
+       if err := db.Model(&Address{}).Count(&deltaAddressCount).Error; err != nil {
+               t.Errorf("failed to fetch address count")
+               t.FailNow()
+       }
+
+       placeAddress := &Address{
+               Address1: "somewhere on earth",
+       }
+       ownerAddress1 := &Address{
+               Address1: "near place address",
+       }
+       ownerAddress2 := &Address{
+               Address1: "address2",
+       }
+       db.Create(placeAddress)
+
+       addressCountShouldBe := func(t *testing.T, expectedCount int) {
+               countFromDB := 0
+               t.Helper()
+               err := db.Model(&Address{}).Count(&countFromDB).Error
+               if err != nil {
+                       t.Error("failed to fetch address count")
+               }
+               if countFromDB != expectedCount {
+                       t.Errorf("address count mismatch: %d", countFromDB)
+               }
+       }
+       addressCountShouldBe(t, deltaAddressCount+1)
+
+       // owner address should be created, place address should be reused
+       place1 := &Place{
+               PlaceAddressID: placeAddress.ID,
+               PlaceAddress:   placeAddress,
+               OwnerAddress:   ownerAddress1,
+       }
+       err := db.Create(place1).Error
+       if err != nil {
+               t.Errorf("failed to store place: %s", err.Error())
+       }
+       addressCountShouldBe(t, deltaAddressCount+2)
+
+       // owner address should be created again, place address should be reused
+       place2 := &Place{
+               PlaceAddressID: placeAddress.ID,
+               PlaceAddress: &Address{
+                       ID:       777,
+                       Address1: "address1",
+               },
+               OwnerAddress:   ownerAddress2,
+               OwnerAddressID: 778,
+       }
+       err = db.Create(place2).Error
+       if err != nil {
+               t.Errorf("failed to store place: %s", err.Error())
+       }
+       addressCountShouldBe(t, deltaAddressCount+3)
+
+       count := 0
+       db.Model(&Place{}).Where(&Place{
+               PlaceAddressID: placeAddress.ID,
+               OwnerAddressID: ownerAddress1.ID,
+       }).Count(&count)
+       if count != 1 {
+               t.Errorf("only one instance of (%d, %d) should be available, found: %d",
+                       placeAddress.ID, ownerAddress1.ID, count)
+       }
+
+       db.Model(&Place{}).Where(&Place{
+               PlaceAddressID: placeAddress.ID,
+               OwnerAddressID: ownerAddress2.ID,
+       }).Count(&count)
+       if count != 1 {
+               t.Errorf("only one instance of (%d, %d) should be available, found: %d",
+                       placeAddress.ID, ownerAddress2.ID, count)
+       }
+
+       db.Model(&Place{}).Where(&Place{
+               PlaceAddressID: placeAddress.ID,
+       }).Count(&count)
+       if count != 2 {
+               t.Errorf("two instances of (%d) should be available, found: %d",
+                       placeAddress.ID, count)
+       }
+}
+
+func TestBlockGlobalUpdate(t *testing.T) {
+       db := DB.New()
+       db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
+
+       err := db.Model(&Toy{}).Update("OwnerType", "Human").Error
+       if err != nil {
+               t.Error("Unexpected error on global update")
+       }
+
+       err = db.Delete(&Toy{}).Error
+       if err != nil {
+               t.Error("Unexpected error on global delete")
+       }
+
+       db.BlockGlobalUpdate(true)
+
+       db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
+
+       err = db.Model(&Toy{}).Update("OwnerType", "Human").Error
+       if err == nil {
+               t.Error("Expected error on global update")
+       }
+
+       err = db.Model(&Toy{}).Where(&Toy{OwnerType: "Martian"}).Update("OwnerType", "Astronaut").Error
+       if err != nil {
+               t.Error("Unxpected error on conditional update")
+       }
+
+       err = db.Delete(&Toy{}).Error
+       if err == nil {
+               t.Error("Expected error on global delete")
+       }
+       err = db.Where(&Toy{OwnerType: "Martian"}).Delete(&Toy{}).Error
+       if err != nil {
+               t.Error("Unexpected error on conditional delete")
+       }
+}
+
+func BenchmarkGorm(b *testing.B) {
+       b.N = 2000
+       for x := 0; x < b.N; x++ {
+               e := strconv.Itoa(x) + "benchmark@example.org"
+               now := time.Now()
+               email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
+               // Insert
+               DB.Save(&email)
+               // Query
+               DB.First(&EmailWithIdx{}, "email = ?", e)
+               // Update
+               DB.Model(&email).UpdateColumn("email", "new-"+e)
+               // Delete
+               DB.Delete(&email)
+       }
+}
+
+func BenchmarkRawSql(b *testing.B) {
+       DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable")
+       DB.SetMaxIdleConns(10)
+       insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id"
+       querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1"
+       updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3"
+       deleteSql := "DELETE FROM orders WHERE id = $1"
+
+       b.N = 2000
+       for x := 0; x < b.N; x++ {
+               var id int64
+               e := strconv.Itoa(x) + "benchmark@example.org"
+               now := time.Now()
+               email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
+               // Insert
+               DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id)
+               // Query
+               rows, _ := DB.Query(querySql, email.Email)
+               rows.Close()
+               // Update
+               DB.Exec(updateSql, "new-"+e, time.Now(), id)
+               // Delete
+               DB.Exec(deleteSql, id)
+       }
+}
+
+func parseTime(str string) *time.Time {
+       t := now.New(time.Now().UTC()).MustParse(str)
+       return &t
+}
diff --git a/vendor/github.com/jinzhu/gorm/migration_test.go b/vendor/github.com/jinzhu/gorm/migration_test.go
new file mode 100755 (executable)
index 0000000..3fb0664
--- /dev/null
@@ -0,0 +1,540 @@
+package gorm_test
+
+import (
+       "database/sql"
+       "database/sql/driver"
+       "errors"
+       "fmt"
+       "os"
+       "reflect"
+       "strconv"
+       "testing"
+       "time"
+
+       "github.com/jinzhu/gorm"
+)
+
+type User struct {
+       Id                int64
+       Age               int64
+       UserNum           Num
+       Name              string `sql:"size:255"`
+       Email             string
+       Birthday          *time.Time    // Time
+       CreatedAt         time.Time     // CreatedAt: Time of record is created, will be insert automatically
+       UpdatedAt         time.Time     // UpdatedAt: Time of record is updated, will be updated automatically
+       Emails            []Email       // Embedded structs
+       BillingAddress    Address       // Embedded struct
+       BillingAddressID  sql.NullInt64 // Embedded struct's foreign key
+       ShippingAddress   Address       // Embedded struct
+       ShippingAddressId int64         // Embedded struct's foreign key
+       CreditCard        CreditCard
+       Latitude          float64
+       Languages         []Language `gorm:"many2many:user_languages;"`
+       CompanyID         *int
+       Company           Company
+       Role              Role
+       Password          EncryptedData
+       PasswordHash      []byte
+       IgnoreMe          int64                 `sql:"-"`
+       IgnoreStringSlice []string              `sql:"-"`
+       Ignored           struct{ Name string } `sql:"-"`
+       IgnoredPointer    *User                 `sql:"-"`
+}
+
+type NotSoLongTableName struct {
+       Id                int64
+       ReallyLongThingID int64
+       ReallyLongThing   ReallyLongTableNameToTestMySQLNameLengthLimit
+}
+
+type ReallyLongTableNameToTestMySQLNameLengthLimit struct {
+       Id int64
+}
+
+type ReallyLongThingThatReferencesShort struct {
+       Id      int64
+       ShortID int64
+       Short   Short
+}
+
+type Short struct {
+       Id int64
+}
+
+type CreditCard struct {
+       ID        int8
+       Number    string
+       UserId    sql.NullInt64
+       CreatedAt time.Time `sql:"not null"`
+       UpdatedAt time.Time
+       DeletedAt *time.Time `sql:"column:deleted_time"`
+}
+
+type Email struct {
+       Id        int16
+       UserId    int
+       Email     string `sql:"type:varchar(100);"`
+       CreatedAt time.Time
+       UpdatedAt time.Time
+}
+
+type Address struct {
+       ID        int
+       Address1  string
+       Address2  string
+       Post      string
+       CreatedAt time.Time
+       UpdatedAt time.Time
+       DeletedAt *time.Time
+}
+
+type Language struct {
+       gorm.Model
+       Name  string
+       Users []User `gorm:"many2many:user_languages;"`
+}
+
+type Product struct {
+       Id                    int64
+       Code                  string
+       Price                 int64
+       CreatedAt             time.Time
+       UpdatedAt             time.Time
+       AfterFindCallTimes    int64
+       BeforeCreateCallTimes int64
+       AfterCreateCallTimes  int64
+       BeforeUpdateCallTimes int64
+       AfterUpdateCallTimes  int64
+       BeforeSaveCallTimes   int64
+       AfterSaveCallTimes    int64
+       BeforeDeleteCallTimes int64
+       AfterDeleteCallTimes  int64
+}
+
+type Company struct {
+       Id    int64
+       Name  string
+       Owner *User `sql:"-"`
+}
+
+type Place struct {
+       Id             int64
+       PlaceAddressID int
+       PlaceAddress   *Address `gorm:"save_associations:false"`
+       OwnerAddressID int
+       OwnerAddress   *Address `gorm:"save_associations:true"`
+}
+
+type EncryptedData []byte
+
+func (data *EncryptedData) Scan(value interface{}) error {
+       if b, ok := value.([]byte); ok {
+               if len(b) < 3 || b[0] != '*' || b[1] != '*' || b[2] != '*' {
+                       return errors.New("Too short")
+               }
+
+               *data = b[3:]
+               return nil
+       }
+
+       return errors.New("Bytes expected")
+}
+
+func (data EncryptedData) Value() (driver.Value, error) {
+       if len(data) > 0 && data[0] == 'x' {
+               //needed to test failures
+               return nil, errors.New("Should not start with 'x'")
+       }
+
+       //prepend asterisks
+       return append([]byte("***"), data...), nil
+}
+
+type Role struct {
+       Name string `gorm:"size:256"`
+}
+
+func (role *Role) Scan(value interface{}) error {
+       if b, ok := value.([]uint8); ok {
+               role.Name = string(b)
+       } else {
+               role.Name = value.(string)
+       }
+       return nil
+}
+
+func (role Role) Value() (driver.Value, error) {
+       return role.Name, nil
+}
+
+func (role Role) IsAdmin() bool {
+       return role.Name == "admin"
+}
+
+type Num int64
+
+func (i *Num) Scan(src interface{}) error {
+       switch s := src.(type) {
+       case []byte:
+               n, _ := strconv.Atoi(string(s))
+               *i = Num(n)
+       case int64:
+               *i = Num(s)
+       default:
+               return errors.New("Cannot scan NamedInt from " + reflect.ValueOf(src).String())
+       }
+       return nil
+}
+
+type Animal struct {
+       Counter    uint64    `gorm:"primary_key:yes"`
+       Name       string    `sql:"DEFAULT:'galeone'"`
+       From       string    //test reserved sql keyword as field name
+       Age        time.Time `sql:"DEFAULT:current_timestamp"`
+       unexported string    // unexported value
+       CreatedAt  time.Time
+       UpdatedAt  time.Time
+}
+
+type JoinTable struct {
+       From uint64
+       To   uint64
+       Time time.Time `sql:"default: null"`
+}
+
+type Post struct {
+       Id             int64
+       CategoryId     sql.NullInt64
+       MainCategoryId int64
+       Title          string
+       Body           string
+       Comments       []*Comment
+       Category       Category
+       MainCategory   Category
+}
+
+type Category struct {
+       gorm.Model
+       Name string
+
+       Categories []Category
+       CategoryID *uint
+}
+
+type Comment struct {
+       gorm.Model
+       PostId  int64
+       Content string
+       Post    Post
+}
+
+// Scanner
+type NullValue struct {
+       Id      int64
+       Name    sql.NullString  `sql:"not null"`
+       Gender  *sql.NullString `sql:"not null"`
+       Age     sql.NullInt64
+       Male    sql.NullBool
+       Height  sql.NullFloat64
+       AddedAt NullTime
+}
+
+type NullTime struct {
+       Time  time.Time
+       Valid bool
+}
+
+func (nt *NullTime) Scan(value interface{}) error {
+       if value == nil {
+               nt.Valid = false
+               return nil
+       }
+       nt.Time, nt.Valid = value.(time.Time), true
+       return nil
+}
+
+func (nt NullTime) Value() (driver.Value, error) {
+       if !nt.Valid {
+               return nil, nil
+       }
+       return nt.Time, nil
+}
+
+func getPreparedUser(name string, role string) *User {
+       var company Company
+       DB.Where(Company{Name: role}).FirstOrCreate(&company)
+
+       return &User{
+               Name:            name,
+               Age:             20,
+               Role:            Role{role},
+               BillingAddress:  Address{Address1: fmt.Sprintf("Billing Address %v", name)},
+               ShippingAddress: Address{Address1: fmt.Sprintf("Shipping Address %v", name)},
+               CreditCard:      CreditCard{Number: fmt.Sprintf("123456%v", name)},
+               Emails: []Email{
+                       {Email: fmt.Sprintf("user_%v@example1.com", name)}, {Email: fmt.Sprintf("user_%v@example2.com", name)},
+               },
+               Company: company,
+               Languages: []Language{
+                       {Name: fmt.Sprintf("lang_1_%v", name)},
+                       {Name: fmt.Sprintf("lang_2_%v", name)},
+               },
+       }
+}
+
+func runMigration() {
+       if err := DB.DropTableIfExists(&User{}).Error; err != nil {
+               fmt.Printf("Got error when try to delete table users, %+v\n", err)
+       }
+
+       for _, table := range []string{"animals", "user_languages"} {
+               DB.Exec(fmt.Sprintf("drop table %v;", table))
+       }
+
+       values := []interface{}{&Short{}, &ReallyLongThingThatReferencesShort{}, &ReallyLongTableNameToTestMySQLNameLengthLimit{}, &NotSoLongTableName{}, &Product{}, &Email{}, &Address{}, &CreditCard{}, &Company{}, &Role{}, &Language{}, &HNPost{}, &EngadgetPost{}, &Animal{}, &User{}, &JoinTable{}, &Post{}, &Category{}, &Comment{}, &Cat{}, &Dog{}, &Hamster{}, &Toy{}, &ElementWithIgnoredField{}, &Place{}}
+       for _, value := range values {
+               DB.DropTable(value)
+       }
+       if err := DB.AutoMigrate(values...).Error; err != nil {
+               panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
+       }
+}
+
+func TestIndexes(t *testing.T) {
+       if err := DB.Model(&Email{}).AddIndex("idx_email_email", "email").Error; err != nil {
+               t.Errorf("Got error when tried to create index: %+v", err)
+       }
+
+       scope := DB.NewScope(&Email{})
+       if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") {
+               t.Errorf("Email should have index idx_email_email")
+       }
+
+       if err := DB.Model(&Email{}).RemoveIndex("idx_email_email").Error; err != nil {
+               t.Errorf("Got error when tried to remove index: %+v", err)
+       }
+
+       if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") {
+               t.Errorf("Email's index idx_email_email should be deleted")
+       }
+
+       if err := DB.Model(&Email{}).AddIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil {
+               t.Errorf("Got error when tried to create index: %+v", err)
+       }
+
+       if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") {
+               t.Errorf("Email should have index idx_email_email_and_user_id")
+       }
+
+       if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil {
+               t.Errorf("Got error when tried to remove index: %+v", err)
+       }
+
+       if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") {
+               t.Errorf("Email's index idx_email_email_and_user_id should be deleted")
+       }
+
+       if err := DB.Model(&Email{}).AddUniqueIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil {
+               t.Errorf("Got error when tried to create index: %+v", err)
+       }
+
+       if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") {
+               t.Errorf("Email should have index idx_email_email_and_user_id")
+       }
+
+       if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.comiii"}, {Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error == nil {
+               t.Errorf("Should get to create duplicate record when having unique index")
+       }
+
+       var user = User{Name: "sample_user"}
+       DB.Save(&user)
+       if DB.Model(&user).Association("Emails").Append(Email{Email: "not-1duplicated@gmail.com"}, Email{Email: "not-duplicated2@gmail.com"}).Error != nil {
+               t.Errorf("Should get no error when append two emails for user")
+       }
+
+       if DB.Model(&user).Association("Emails").Append(Email{Email: "duplicated@gmail.com"}, Email{Email: "duplicated@gmail.com"}).Error == nil {
+               t.Errorf("Should get no duplicated email error when insert duplicated emails for a user")
+       }
+
+       if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil {
+               t.Errorf("Got error when tried to remove index: %+v", err)
+       }
+
+       if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") {
+               t.Errorf("Email's index idx_email_email_and_user_id should be deleted")
+       }
+
+       if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error != nil {
+               t.Errorf("Should be able to create duplicated emails after remove unique index")
+       }
+}
+
+type EmailWithIdx struct {
+       Id           int64
+       UserId       int64
+       Email        string     `sql:"index:idx_email_agent"`
+       UserAgent    string     `sql:"index:idx_email_agent"`
+       RegisteredAt *time.Time `sql:"unique_index"`
+       CreatedAt    time.Time
+       UpdatedAt    time.Time
+}
+
+func TestAutoMigration(t *testing.T) {
+       DB.AutoMigrate(&Address{})
+       DB.DropTable(&EmailWithIdx{})
+       if err := DB.AutoMigrate(&EmailWithIdx{}).Error; err != nil {
+               t.Errorf("Auto Migrate should not raise any error")
+       }
+
+       now := time.Now()
+       DB.Save(&EmailWithIdx{Email: "jinzhu@example.org", UserAgent: "pc", RegisteredAt: &now})
+
+       scope := DB.NewScope(&EmailWithIdx{})
+       if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_agent") {
+               t.Errorf("Failed to create index")
+       }
+
+       if !scope.Dialect().HasIndex(scope.TableName(), "uix_email_with_idxes_registered_at") {
+               t.Errorf("Failed to create index")
+       }
+
+       var bigemail EmailWithIdx
+       DB.First(&bigemail, "user_agent = ?", "pc")
+       if bigemail.Email != "jinzhu@example.org" || bigemail.UserAgent != "pc" || bigemail.RegisteredAt.IsZero() {
+               t.Error("Big Emails should be saved and fetched correctly")
+       }
+}
+
+func TestCreateAndAutomigrateTransaction(t *testing.T) {
+       tx := DB.Begin()
+
+       func() {
+               type Bar struct {
+                       ID uint
+               }
+               DB.DropTableIfExists(&Bar{})
+
+               if ok := DB.HasTable("bars"); ok {
+                       t.Errorf("Table should not exist, but does")
+               }
+
+               if ok := tx.HasTable("bars"); ok {
+                       t.Errorf("Table should not exist, but does")
+               }
+       }()
+
+       func() {
+               type Bar struct {
+                       Name string
+               }
+               err := tx.CreateTable(&Bar{}).Error
+
+               if err != nil {
+                       t.Errorf("Should have been able to create the table, but couldn't: %s", err)
+               }
+
+               if ok := tx.HasTable(&Bar{}); !ok {
+                       t.Errorf("The transaction should be able to see the table")
+               }
+       }()
+
+       func() {
+               type Bar struct {
+                       Stuff string
+               }
+
+               err := tx.AutoMigrate(&Bar{}).Error
+               if err != nil {
+                       t.Errorf("Should have been able to alter the table, but couldn't")
+               }
+       }()
+
+       tx.Rollback()
+}
+
+type MultipleIndexes struct {
+       ID     int64
+       UserID int64  `sql:"unique_index:uix_multipleindexes_user_name,uix_multipleindexes_user_email;index:idx_multipleindexes_user_other"`
+       Name   string `sql:"unique_index:uix_multipleindexes_user_name"`
+       Email  string `sql:"unique_index:,uix_multipleindexes_user_email"`
+       Other  string `sql:"index:,idx_multipleindexes_user_other"`
+}
+
+func TestMultipleIndexes(t *testing.T) {
+       if err := DB.DropTableIfExists(&MultipleIndexes{}).Error; err != nil {
+               fmt.Printf("Got error when try to delete table multiple_indexes, %+v\n", err)
+       }
+
+       DB.AutoMigrate(&MultipleIndexes{})
+       if err := DB.AutoMigrate(&EmailWithIdx{}).Error; err != nil {
+               t.Errorf("Auto Migrate should not raise any error")
+       }
+
+       DB.Save(&MultipleIndexes{UserID: 1, Name: "jinzhu", Email: "jinzhu@example.org", Other: "foo"})
+
+       scope := DB.NewScope(&MultipleIndexes{})
+       if !scope.Dialect().HasIndex(scope.TableName(), "uix_multipleindexes_user_name") {
+               t.Errorf("Failed to create index")
+       }
+
+       if !scope.Dialect().HasIndex(scope.TableName(), "uix_multipleindexes_user_email") {
+               t.Errorf("Failed to create index")
+       }
+
+       if !scope.Dialect().HasIndex(scope.TableName(), "uix_multiple_indexes_email") {
+               t.Errorf("Failed to create index")
+       }
+
+       if !scope.Dialect().HasIndex(scope.TableName(), "idx_multipleindexes_user_other") {
+               t.Errorf("Failed to create index")
+       }
+
+       if !scope.Dialect().HasIndex(scope.TableName(), "idx_multiple_indexes_other") {
+               t.Errorf("Failed to create index")
+       }
+
+       var mutipleIndexes MultipleIndexes
+       DB.First(&mutipleIndexes, "name = ?", "jinzhu")
+       if mutipleIndexes.Email != "jinzhu@example.org" || mutipleIndexes.Name != "jinzhu" {
+               t.Error("MutipleIndexes should be saved and fetched correctly")
+       }
+
+       // Check unique constraints
+       if err := DB.Save(&MultipleIndexes{UserID: 1, Name: "name1", Email: "jinzhu@example.org", Other: "foo"}).Error; err == nil {
+               t.Error("MultipleIndexes unique index failed")
+       }
+
+       if err := DB.Save(&MultipleIndexes{UserID: 1, Name: "name1", Email: "foo@example.org", Other: "foo"}).Error; err != nil {
+               t.Error("MultipleIndexes unique index failed")
+       }
+
+       if err := DB.Save(&MultipleIndexes{UserID: 2, Name: "name1", Email: "jinzhu@example.org", Other: "foo"}).Error; err == nil {
+               t.Error("MultipleIndexes unique index failed")
+       }
+
+       if err := DB.Save(&MultipleIndexes{UserID: 2, Name: "name1", Email: "foo2@example.org", Other: "foo"}).Error; err != nil {
+               t.Error("MultipleIndexes unique index failed")
+       }
+}
+
+func TestModifyColumnType(t *testing.T) {
+       if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" && dialect != "mysql" && dialect != "mssql" {
+               t.Skip("Skipping this because only postgres, mysql and mssql support altering a column type")
+       }
+
+       type ModifyColumnType struct {
+               gorm.Model
+               Name1 string `gorm:"length:100"`
+               Name2 string `gorm:"length:200"`
+       }
+       DB.DropTable(&ModifyColumnType{})
+       DB.CreateTable(&ModifyColumnType{})
+
+       name2Field, _ := DB.NewScope(&ModifyColumnType{}).FieldByName("Name2")
+       name2Type := DB.Dialect().DataTypeOf(name2Field.StructField)
+
+       if err := DB.Model(&ModifyColumnType{}).ModifyColumn("name1", name2Type).Error; err != nil {
+               t.Errorf("No error should happen when ModifyColumn, but got %v", err)
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/model.go b/vendor/github.com/jinzhu/gorm/model.go
new file mode 100755 (executable)
index 0000000..f37ff7e
--- /dev/null
@@ -0,0 +1,14 @@
+package gorm
+
+import "time"
+
+// Model base model definition, including fields `ID`, `CreatedAt`, `UpdatedAt`, `DeletedAt`, which could be embedded in your models
+//    type User struct {
+//      gorm.Model
+//    }
+type Model struct {
+       ID        uint `gorm:"primary_key"`
+       CreatedAt time.Time
+       UpdatedAt time.Time
+       DeletedAt *time.Time `sql:"index"`
+}
diff --git a/vendor/github.com/jinzhu/gorm/model_struct.go b/vendor/github.com/jinzhu/gorm/model_struct.go
new file mode 100755 (executable)
index 0000000..8c27e20
--- /dev/null
@@ -0,0 +1,640 @@
+package gorm
+
+import (
+       "database/sql"
+       "errors"
+       "go/ast"
+       "reflect"
+       "strings"
+       "sync"
+       "time"
+
+       "github.com/jinzhu/inflection"
+)
+
+// DefaultTableNameHandler default table name handler
+var DefaultTableNameHandler = func(db *DB, defaultTableName string) string {
+       return defaultTableName
+}
+
+var modelStructsMap sync.Map
+
+// ModelStruct model definition
+type ModelStruct struct {
+       PrimaryFields    []*StructField
+       StructFields     []*StructField
+       ModelType        reflect.Type
+
+       defaultTableName string
+       l sync.Mutex
+}
+
+// TableName returns model's table name
+func (s *ModelStruct) TableName(db *DB) string {
+       s.l.Lock()
+       defer s.l.Unlock()
+
+       if s.defaultTableName == "" && db != nil && s.ModelType != nil {
+               // Set default table name
+               if tabler, ok := reflect.New(s.ModelType).Interface().(tabler); ok {
+                       s.defaultTableName = tabler.TableName()
+               } else {
+                       tableName := ToTableName(s.ModelType.Name())
+                       if db == nil || !db.parent.singularTable {
+                               tableName = inflection.Plural(tableName)
+                       }
+                       s.defaultTableName = tableName
+               }
+       }
+
+       return DefaultTableNameHandler(db, s.defaultTableName)
+}
+
+// StructField model field's struct definition
+type StructField struct {
+       DBName          string
+       Name            string
+       Names           []string
+       IsPrimaryKey    bool
+       IsNormal        bool
+       IsIgnored       bool
+       IsScanner       bool
+       HasDefaultValue bool
+       Tag             reflect.StructTag
+       TagSettings     map[string]string
+       Struct          reflect.StructField
+       IsForeignKey    bool
+       Relationship    *Relationship
+
+       tagSettingsLock sync.RWMutex
+}
+
+// TagSettingsSet Sets a tag in the tag settings map
+func (s *StructField) TagSettingsSet(key, val string) {
+       s.tagSettingsLock.Lock()
+       defer s.tagSettingsLock.Unlock()
+       s.TagSettings[key] = val
+}
+
+// TagSettingsGet returns a tag from the tag settings
+func (s *StructField) TagSettingsGet(key string) (string, bool) {
+       s.tagSettingsLock.RLock()
+       defer s.tagSettingsLock.RUnlock()
+       val, ok := s.TagSettings[key]
+       return val, ok
+}
+
+// TagSettingsDelete deletes a tag
+func (s *StructField) TagSettingsDelete(key string) {
+       s.tagSettingsLock.Lock()
+       defer s.tagSettingsLock.Unlock()
+       delete(s.TagSettings, key)
+}
+
+func (structField *StructField) clone() *StructField {
+       clone := &StructField{
+               DBName:          structField.DBName,
+               Name:            structField.Name,
+               Names:           structField.Names,
+               IsPrimaryKey:    structField.IsPrimaryKey,
+               IsNormal:        structField.IsNormal,
+               IsIgnored:       structField.IsIgnored,
+               IsScanner:       structField.IsScanner,
+               HasDefaultValue: structField.HasDefaultValue,
+               Tag:             structField.Tag,
+               TagSettings:     map[string]string{},
+               Struct:          structField.Struct,
+               IsForeignKey:    structField.IsForeignKey,
+       }
+
+       if structField.Relationship != nil {
+               relationship := *structField.Relationship
+               clone.Relationship = &relationship
+       }
+
+       // copy the struct field tagSettings, they should be read-locked while they are copied
+       structField.tagSettingsLock.Lock()
+       defer structField.tagSettingsLock.Unlock()
+       for key, value := range structField.TagSettings {
+               clone.TagSettings[key] = value
+       }
+
+       return clone
+}
+
+// Relationship described the relationship between models
+type Relationship struct {
+       Kind                         string
+       PolymorphicType              string
+       PolymorphicDBName            string
+       PolymorphicValue             string
+       ForeignFieldNames            []string
+       ForeignDBNames               []string
+       AssociationForeignFieldNames []string
+       AssociationForeignDBNames    []string
+       JoinTableHandler             JoinTableHandlerInterface
+}
+
+func getForeignField(column string, fields []*StructField) *StructField {
+       for _, field := range fields {
+               if field.Name == column || field.DBName == column || field.DBName == ToColumnName(column) {
+                       return field
+               }
+       }
+       return nil
+}
+
+// GetModelStruct get value's model struct, relationships based on struct and tag definition
+func (scope *Scope) GetModelStruct() *ModelStruct {
+       var modelStruct ModelStruct
+       // Scope value can't be nil
+       if scope.Value == nil {
+               return &modelStruct
+       }
+
+       reflectType := reflect.ValueOf(scope.Value).Type()
+       for reflectType.Kind() == reflect.Slice || reflectType.Kind() == reflect.Ptr {
+               reflectType = reflectType.Elem()
+       }
+
+       // Scope value need to be a struct
+       if reflectType.Kind() != reflect.Struct {
+               return &modelStruct
+       }
+
+       // Get Cached model struct
+       if value, ok := modelStructsMap.Load(reflectType); ok && value != nil {
+               return value.(*ModelStruct)
+       }
+
+       modelStruct.ModelType = reflectType
+
+       // Get all fields
+       for i := 0; i < reflectType.NumField(); i++ {
+               if fieldStruct := reflectType.Field(i); ast.IsExported(fieldStruct.Name) {
+                       field := &StructField{
+                               Struct:      fieldStruct,
+                               Name:        fieldStruct.Name,
+                               Names:       []string{fieldStruct.Name},
+                               Tag:         fieldStruct.Tag,
+                               TagSettings: parseTagSetting(fieldStruct.Tag),
+                       }
+
+                       // is ignored field
+                       if _, ok := field.TagSettingsGet("-"); ok {
+                               field.IsIgnored = true
+                       } else {
+                               if _, ok := field.TagSettingsGet("PRIMARY_KEY"); ok {
+                                       field.IsPrimaryKey = true
+                                       modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field)
+                               }
+
+                               if _, ok := field.TagSettingsGet("DEFAULT"); ok {
+                                       field.HasDefaultValue = true
+                               }
+
+                               if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok && !field.IsPrimaryKey {
+                                       field.HasDefaultValue = true
+                               }
+
+                               indirectType := fieldStruct.Type
+                               for indirectType.Kind() == reflect.Ptr {
+                                       indirectType = indirectType.Elem()
+                               }
+
+                               fieldValue := reflect.New(indirectType).Interface()
+                               if _, isScanner := fieldValue.(sql.Scanner); isScanner {
+                                       // is scanner
+                                       field.IsScanner, field.IsNormal = true, true
+                                       if indirectType.Kind() == reflect.Struct {
+                                               for i := 0; i < indirectType.NumField(); i++ {
+                                                       for key, value := range parseTagSetting(indirectType.Field(i).Tag) {
+                                                               if _, ok := field.TagSettingsGet(key); !ok {
+                                                                       field.TagSettingsSet(key, value)
+                                                               }
+                                                       }
+                                               }
+                                       }
+                               } else if _, isTime := fieldValue.(*time.Time); isTime {
+                                       // is time
+                                       field.IsNormal = true
+                               } else if _, ok := field.TagSettingsGet("EMBEDDED"); ok || fieldStruct.Anonymous {
+                                       // is embedded struct
+                                       for _, subField := range scope.New(fieldValue).GetModelStruct().StructFields {
+                                               subField = subField.clone()
+                                               subField.Names = append([]string{fieldStruct.Name}, subField.Names...)
+                                               if prefix, ok := field.TagSettingsGet("EMBEDDED_PREFIX"); ok {
+                                                       subField.DBName = prefix + subField.DBName
+                                               }
+
+                                               if subField.IsPrimaryKey {
+                                                       if _, ok := subField.TagSettingsGet("PRIMARY_KEY"); ok {
+                                                               modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, subField)
+                                                       } else {
+                                                               subField.IsPrimaryKey = false
+                                                       }
+                                               }
+
+                                               if subField.Relationship != nil && subField.Relationship.JoinTableHandler != nil {
+                                                       if joinTableHandler, ok := subField.Relationship.JoinTableHandler.(*JoinTableHandler); ok {
+                                                               newJoinTableHandler := &JoinTableHandler{}
+                                                               newJoinTableHandler.Setup(subField.Relationship, joinTableHandler.TableName, reflectType, joinTableHandler.Destination.ModelType)
+                                                               subField.Relationship.JoinTableHandler = newJoinTableHandler
+                                                       }
+                                               }
+
+                                               modelStruct.StructFields = append(modelStruct.StructFields, subField)
+                                       }
+                                       continue
+                               } else {
+                                       // build relationships
+                                       switch indirectType.Kind() {
+                                       case reflect.Slice:
+                                               defer func(field *StructField) {
+                                                       var (
+                                                               relationship           = &Relationship{}
+                                                               toScope                = scope.New(reflect.New(field.Struct.Type).Interface())
+                                                               foreignKeys            []string
+                                                               associationForeignKeys []string
+                                                               elemType               = field.Struct.Type
+                                                       )
+
+                                                       if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" {
+                                                               foreignKeys = strings.Split(foreignKey, ",")
+                                                       }
+
+                                                       if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" {
+                                                               associationForeignKeys = strings.Split(foreignKey, ",")
+                                                       } else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" {
+                                                               associationForeignKeys = strings.Split(foreignKey, ",")
+                                                       }
+
+                                                       for elemType.Kind() == reflect.Slice || elemType.Kind() == reflect.Ptr {
+                                                               elemType = elemType.Elem()
+                                                       }
+
+                                                       if elemType.Kind() == reflect.Struct {
+                                                               if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" {
+                                                                       relationship.Kind = "many_to_many"
+
+                                                                       { // Foreign Keys for Source
+                                                                               joinTableDBNames := []string{}
+
+                                                                               if foreignKey, _ := field.TagSettingsGet("JOINTABLE_FOREIGNKEY"); foreignKey != "" {
+                                                                                       joinTableDBNames = strings.Split(foreignKey, ",")
+                                                                               }
+
+                                                                               // if no foreign keys defined with tag
+                                                                               if len(foreignKeys) == 0 {
+                                                                                       for _, field := range modelStruct.PrimaryFields {
+                                                                                               foreignKeys = append(foreignKeys, field.DBName)
+                                                                                       }
+                                                                               }
+
+                                                                               for idx, foreignKey := range foreignKeys {
+                                                                                       if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil {
+                                                                                               // source foreign keys (db names)
+                                                                                               relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.DBName)
+
+                                                                                               // setup join table foreign keys for source
+                                                                                               if len(joinTableDBNames) > idx {
+                                                                                                       // if defined join table's foreign key
+                                                                                                       relationship.ForeignDBNames = append(relationship.ForeignDBNames, joinTableDBNames[idx])
+                                                                                               } else {
+                                                                                                       defaultJointableForeignKey := ToColumnName(reflectType.Name()) + "_" + foreignField.DBName
+                                                                                                       relationship.ForeignDBNames = append(relationship.ForeignDBNames, defaultJointableForeignKey)
+                                                                                               }
+                                                                                       }
+                                                                               }
+                                                                       }
+
+                                                                       { // Foreign Keys for Association (Destination)
+                                                                               associationJoinTableDBNames := []string{}
+
+                                                                               if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_JOINTABLE_FOREIGNKEY"); foreignKey != "" {
+                                                                                       associationJoinTableDBNames = strings.Split(foreignKey, ",")
+                                                                               }
+
+                                                                               // if no association foreign keys defined with tag
+                                                                               if len(associationForeignKeys) == 0 {
+                                                                                       for _, field := range toScope.PrimaryFields() {
+                                                                                               associationForeignKeys = append(associationForeignKeys, field.DBName)
+                                                                                       }
+                                                                               }
+
+                                                                               for idx, name := range associationForeignKeys {
+                                                                                       if field, ok := toScope.FieldByName(name); ok {
+                                                                                               // association foreign keys (db names)
+                                                                                               relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.DBName)
+
+                                                                                               // setup join table foreign keys for association
+                                                                                               if len(associationJoinTableDBNames) > idx {
+                                                                                                       relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationJoinTableDBNames[idx])
+                                                                                               } else {
+                                                                                                       // join table foreign keys for association
+                                                                                                       joinTableDBName := ToColumnName(elemType.Name()) + "_" + field.DBName
+                                                                                                       relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, joinTableDBName)
+                                                                                               }
+                                                                                       }
+                                                                               }
+                                                                       }
+
+                                                                       joinTableHandler := JoinTableHandler{}
+                                                                       joinTableHandler.Setup(relationship, ToTableName(many2many), reflectType, elemType)
+                                                                       relationship.JoinTableHandler = &joinTableHandler
+                                                                       field.Relationship = relationship
+                                                               } else {
+                                                                       // User has many comments, associationType is User, comment use UserID as foreign key
+                                                                       var associationType = reflectType.Name()
+                                                                       var toFields = toScope.GetStructFields()
+                                                                       relationship.Kind = "has_many"
+
+                                                                       if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" {
+                                                                               // Dog has many toys, tag polymorphic is Owner, then associationType is Owner
+                                                                               // Toy use OwnerID, OwnerType ('dogs') as foreign key
+                                                                               if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil {
+                                                                                       associationType = polymorphic
+                                                                                       relationship.PolymorphicType = polymorphicType.Name
+                                                                                       relationship.PolymorphicDBName = polymorphicType.DBName
+                                                                                       // if Dog has multiple set of toys set name of the set (instead of default 'dogs')
+                                                                                       if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok {
+                                                                                               relationship.PolymorphicValue = value
+                                                                                       } else {
+                                                                                               relationship.PolymorphicValue = scope.TableName()
+                                                                                       }
+                                                                                       polymorphicType.IsForeignKey = true
+                                                                               }
+                                                                       }
+
+                                                                       // if no foreign keys defined with tag
+                                                                       if len(foreignKeys) == 0 {
+                                                                               // if no association foreign keys defined with tag
+                                                                               if len(associationForeignKeys) == 0 {
+                                                                                       for _, field := range modelStruct.PrimaryFields {
+                                                                                               foreignKeys = append(foreignKeys, associationType+field.Name)
+                                                                                               associationForeignKeys = append(associationForeignKeys, field.Name)
+                                                                                       }
+                                                                               } else {
+                                                                                       // generate foreign keys from defined association foreign keys
+                                                                                       for _, scopeFieldName := range associationForeignKeys {
+                                                                                               if foreignField := getForeignField(scopeFieldName, modelStruct.StructFields); foreignField != nil {
+                                                                                                       foreignKeys = append(foreignKeys, associationType+foreignField.Name)
+                                                                                                       associationForeignKeys = append(associationForeignKeys, foreignField.Name)
+                                                                                               }
+                                                                                       }
+                                                                               }
+                                                                       } else {
+                                                                               // generate association foreign keys from foreign keys
+                                                                               if len(associationForeignKeys) == 0 {
+                                                                                       for _, foreignKey := range foreignKeys {
+                                                                                               if strings.HasPrefix(foreignKey, associationType) {
+                                                                                                       associationForeignKey := strings.TrimPrefix(foreignKey, associationType)
+                                                                                                       if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
+                                                                                                               associationForeignKeys = append(associationForeignKeys, associationForeignKey)
+                                                                                                       }
+                                                                                               }
+                                                                                       }
+                                                                                       if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
+                                                                                               associationForeignKeys = []string{scope.PrimaryKey()}
+                                                                                       }
+                                                                               } else if len(foreignKeys) != len(associationForeignKeys) {
+                                                                                       scope.Err(errors.New("invalid foreign keys, should have same length"))
+                                                                                       return
+                                                                               }
+                                                                       }
+
+                                                                       for idx, foreignKey := range foreignKeys {
+                                                                               if foreignField := getForeignField(foreignKey, toFields); foreignField != nil {
+                                                                                       if associationField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); associationField != nil {
+                                                                                               // source foreign keys
+                                                                                               foreignField.IsForeignKey = true
+                                                                                               relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name)
+                                                                                               relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName)
+
+                                                                                               // association foreign keys
+                                                                                               relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
+                                                                                               relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
+                                                                                       }
+                                                                               }
+                                                                       }
+
+                                                                       if len(relationship.ForeignFieldNames) != 0 {
+                                                                               field.Relationship = relationship
+                                                                       }
+                                                               }
+                                                       } else {
+                                                               field.IsNormal = true
+                                                       }
+                                               }(field)
+                                       case reflect.Struct:
+                                               defer func(field *StructField) {
+                                                       var (
+                                                               // user has one profile, associationType is User, profile use UserID as foreign key
+                                                               // user belongs to profile, associationType is Profile, user use ProfileID as foreign key
+                                                               associationType           = reflectType.Name()
+                                                               relationship              = &Relationship{}
+                                                               toScope                   = scope.New(reflect.New(field.Struct.Type).Interface())
+                                                               toFields                  = toScope.GetStructFields()
+                                                               tagForeignKeys            []string
+                                                               tagAssociationForeignKeys []string
+                                                       )
+
+                                                       if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" {
+                                                               tagForeignKeys = strings.Split(foreignKey, ",")
+                                                       }
+
+                                                       if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" {
+                                                               tagAssociationForeignKeys = strings.Split(foreignKey, ",")
+                                                       } else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" {
+                                                               tagAssociationForeignKeys = strings.Split(foreignKey, ",")
+                                                       }
+
+                                                       if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" {
+                                                               // Cat has one toy, tag polymorphic is Owner, then associationType is Owner
+                                                               // Toy use OwnerID, OwnerType ('cats') as foreign key
+                                                               if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil {
+                                                                       associationType = polymorphic
+                                                                       relationship.PolymorphicType = polymorphicType.Name
+                                                                       relationship.PolymorphicDBName = polymorphicType.DBName
+                                                                       // if Cat has several different types of toys set name for each (instead of default 'cats')
+                                                                       if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok {
+                                                                               relationship.PolymorphicValue = value
+                                                                       } else {
+                                                                               relationship.PolymorphicValue = scope.TableName()
+                                                                       }
+                                                                       polymorphicType.IsForeignKey = true
+                                                               }
+                                                       }
+
+                                                       // Has One
+                                                       {
+                                                               var foreignKeys = tagForeignKeys
+                                                               var associationForeignKeys = tagAssociationForeignKeys
+                                                               // if no foreign keys defined with tag
+                                                               if len(foreignKeys) == 0 {
+                                                                       // if no association foreign keys defined with tag
+                                                                       if len(associationForeignKeys) == 0 {
+                                                                               for _, primaryField := range modelStruct.PrimaryFields {
+                                                                                       foreignKeys = append(foreignKeys, associationType+primaryField.Name)
+                                                                                       associationForeignKeys = append(associationForeignKeys, primaryField.Name)
+                                                                               }
+                                                                       } else {
+                                                                               // generate foreign keys form association foreign keys
+                                                                               for _, associationForeignKey := range tagAssociationForeignKeys {
+                                                                                       if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
+                                                                                               foreignKeys = append(foreignKeys, associationType+foreignField.Name)
+                                                                                               associationForeignKeys = append(associationForeignKeys, foreignField.Name)
+                                                                                       }
+                                                                               }
+                                                                       }
+                                                               } else {
+                                                                       // generate association foreign keys from foreign keys
+                                                                       if len(associationForeignKeys) == 0 {
+                                                                               for _, foreignKey := range foreignKeys {
+                                                                                       if strings.HasPrefix(foreignKey, associationType) {
+                                                                                               associationForeignKey := strings.TrimPrefix(foreignKey, associationType)
+                                                                                               if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
+                                                                                                       associationForeignKeys = append(associationForeignKeys, associationForeignKey)
+                                                                                               }
+                                                                                       }
+                                                                               }
+                                                                               if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
+                                                                                       associationForeignKeys = []string{scope.PrimaryKey()}
+                                                                               }
+                                                                       } else if len(foreignKeys) != len(associationForeignKeys) {
+                                                                               scope.Err(errors.New("invalid foreign keys, should have same length"))
+                                                                               return
+                                                                       }
+                                                               }
+
+                                                               for idx, foreignKey := range foreignKeys {
+                                                                       if foreignField := getForeignField(foreignKey, toFields); foreignField != nil {
+                                                                               if scopeField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); scopeField != nil {
+                                                                                       foreignField.IsForeignKey = true
+                                                                                       // source foreign keys
+                                                                                       relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scopeField.Name)
+                                                                                       relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scopeField.DBName)
+
+                                                                                       // association foreign keys
+                                                                                       relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
+                                                                                       relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
+                                                                               }
+                                                                       }
+                                                               }
+                                                       }
+
+                                                       if len(relationship.ForeignFieldNames) != 0 {
+                                                               relationship.Kind = "has_one"
+                                                               field.Relationship = relationship
+                                                       } else {
+                                                               var foreignKeys = tagForeignKeys
+                                                               var associationForeignKeys = tagAssociationForeignKeys
+
+                                                               if len(foreignKeys) == 0 {
+                                                                       // generate foreign keys & association foreign keys
+                                                                       if len(associationForeignKeys) == 0 {
+                                                                               for _, primaryField := range toScope.PrimaryFields() {
+                                                                                       foreignKeys = append(foreignKeys, field.Name+primaryField.Name)
+                                                                                       associationForeignKeys = append(associationForeignKeys, primaryField.Name)
+                                                                               }
+                                                                       } else {
+                                                                               // generate foreign keys with association foreign keys
+                                                                               for _, associationForeignKey := range associationForeignKeys {
+                                                                                       if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil {
+                                                                                               foreignKeys = append(foreignKeys, field.Name+foreignField.Name)
+                                                                                               associationForeignKeys = append(associationForeignKeys, foreignField.Name)
+                                                                                       }
+                                                                               }
+                                                                       }
+                                                               } else {
+                                                                       // generate foreign keys & association foreign keys
+                                                                       if len(associationForeignKeys) == 0 {
+                                                                               for _, foreignKey := range foreignKeys {
+                                                                                       if strings.HasPrefix(foreignKey, field.Name) {
+                                                                                               associationForeignKey := strings.TrimPrefix(foreignKey, field.Name)
+                                                                                               if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil {
+                                                                                                       associationForeignKeys = append(associationForeignKeys, associationForeignKey)
+                                                                                               }
+                                                                                       }
+                                                                               }
+                                                                               if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
+                                                                                       associationForeignKeys = []string{toScope.PrimaryKey()}
+                                                                               }
+                                                                       } else if len(foreignKeys) != len(associationForeignKeys) {
+                                                                               scope.Err(errors.New("invalid foreign keys, should have same length"))
+                                                                               return
+                                                                       }
+                                                               }
+
+                                                               for idx, foreignKey := range foreignKeys {
+                                                                       if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil {
+                                                                               if associationField := getForeignField(associationForeignKeys[idx], toFields); associationField != nil {
+                                                                                       foreignField.IsForeignKey = true
+
+                                                                                       // association foreign keys
+                                                                                       relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name)
+                                                                                       relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName)
+
+                                                                                       // source foreign keys
+                                                                                       relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
+                                                                                       relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
+                                                                               }
+                                                                       }
+                                                               }
+
+                                                               if len(relationship.ForeignFieldNames) != 0 {
+                                                                       relationship.Kind = "belongs_to"
+                                                                       field.Relationship = relationship
+                                                               }
+                                                       }
+                                               }(field)
+                                       default:
+                                               field.IsNormal = true
+                                       }
+                               }
+                       }
+
+                       // Even it is ignored, also possible to decode db value into the field
+                       if value, ok := field.TagSettingsGet("COLUMN"); ok {
+                               field.DBName = value
+                       } else {
+                               field.DBName = ToColumnName(fieldStruct.Name)
+                       }
+
+                       modelStruct.StructFields = append(modelStruct.StructFields, field)
+               }
+       }
+
+       if len(modelStruct.PrimaryFields) == 0 {
+               if field := getForeignField("id", modelStruct.StructFields); field != nil {
+                       field.IsPrimaryKey = true
+                       modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field)
+               }
+       }
+
+       modelStructsMap.Store(reflectType, &modelStruct)
+
+       return &modelStruct
+}
+
+// GetStructFields get model's field structs
+func (scope *Scope) GetStructFields() (fields []*StructField) {
+       return scope.GetModelStruct().StructFields
+}
+
+func parseTagSetting(tags reflect.StructTag) map[string]string {
+       setting := map[string]string{}
+       for _, str := range []string{tags.Get("sql"), tags.Get("gorm")} {
+               tags := strings.Split(str, ";")
+               for _, value := range tags {
+                       v := strings.Split(value, ":")
+                       k := strings.TrimSpace(strings.ToUpper(v[0]))
+                       if len(v) >= 2 {
+                               setting[k] = strings.Join(v[1:], ":")
+                       } else {
+                               setting[k] = k
+                       }
+               }
+       }
+       return setting
+}
diff --git a/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go b/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go
new file mode 100755 (executable)
index 0000000..32a1477
--- /dev/null
@@ -0,0 +1,381 @@
+package gorm_test
+
+import (
+       "os"
+       "reflect"
+       "sort"
+       "testing"
+)
+
+type Blog struct {
+       ID         uint   `gorm:"primary_key"`
+       Locale     string `gorm:"primary_key"`
+       Subject    string
+       Body       string
+       Tags       []Tag `gorm:"many2many:blog_tags;"`
+       SharedTags []Tag `gorm:"many2many:shared_blog_tags;ForeignKey:id;AssociationForeignKey:id"`
+       LocaleTags []Tag `gorm:"many2many:locale_blog_tags;ForeignKey:id,locale;AssociationForeignKey:id"`
+}
+
+type Tag struct {
+       ID     uint   `gorm:"primary_key"`
+       Locale string `gorm:"primary_key"`
+       Value  string
+       Blogs  []*Blog `gorm:"many2many:blogs_tags"`
+}
+
+func compareTags(tags []Tag, contents []string) bool {
+       var tagContents []string
+       for _, tag := range tags {
+               tagContents = append(tagContents, tag.Value)
+       }
+       sort.Strings(tagContents)
+       sort.Strings(contents)
+       return reflect.DeepEqual(tagContents, contents)
+}
+
+func TestManyToManyWithMultiPrimaryKeys(t *testing.T) {
+       if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" && dialect != "mssql" {
+               DB.DropTable(&Blog{}, &Tag{})
+               DB.DropTable("blog_tags")
+               DB.CreateTable(&Blog{}, &Tag{})
+               blog := Blog{
+                       Locale:  "ZH",
+                       Subject: "subject",
+                       Body:    "body",
+                       Tags: []Tag{
+                               {Locale: "ZH", Value: "tag1"},
+                               {Locale: "ZH", Value: "tag2"},
+                       },
+               }
+
+               DB.Save(&blog)
+               if !compareTags(blog.Tags, []string{"tag1", "tag2"}) {
+                       t.Errorf("Blog should has two tags")
+               }
+
+               // Append
+               var tag3 = &Tag{Locale: "ZH", Value: "tag3"}
+               DB.Model(&blog).Association("Tags").Append([]*Tag{tag3})
+               if !compareTags(blog.Tags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Blog should has three tags after Append")
+               }
+
+               if DB.Model(&blog).Association("Tags").Count() != 3 {
+                       t.Errorf("Blog should has three tags after Append")
+               }
+
+               var tags []Tag
+               DB.Model(&blog).Related(&tags, "Tags")
+               if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Should find 3 tags with Related")
+               }
+
+               var blog1 Blog
+               DB.Preload("Tags").Find(&blog1)
+               if !compareTags(blog1.Tags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Preload many2many relations")
+               }
+
+               // Replace
+               var tag5 = &Tag{Locale: "ZH", Value: "tag5"}
+               var tag6 = &Tag{Locale: "ZH", Value: "tag6"}
+               DB.Model(&blog).Association("Tags").Replace(tag5, tag6)
+               var tags2 []Tag
+               DB.Model(&blog).Related(&tags2, "Tags")
+               if !compareTags(tags2, []string{"tag5", "tag6"}) {
+                       t.Errorf("Should find 2 tags after Replace")
+               }
+
+               if DB.Model(&blog).Association("Tags").Count() != 2 {
+                       t.Errorf("Blog should has three tags after Replace")
+               }
+
+               // Delete
+               DB.Model(&blog).Association("Tags").Delete(tag5)
+               var tags3 []Tag
+               DB.Model(&blog).Related(&tags3, "Tags")
+               if !compareTags(tags3, []string{"tag6"}) {
+                       t.Errorf("Should find 1 tags after Delete")
+               }
+
+               if DB.Model(&blog).Association("Tags").Count() != 1 {
+                       t.Errorf("Blog should has three tags after Delete")
+               }
+
+               DB.Model(&blog).Association("Tags").Delete(tag3)
+               var tags4 []Tag
+               DB.Model(&blog).Related(&tags4, "Tags")
+               if !compareTags(tags4, []string{"tag6"}) {
+                       t.Errorf("Tag should not be deleted when Delete with a unrelated tag")
+               }
+
+               // Clear
+               DB.Model(&blog).Association("Tags").Clear()
+               if DB.Model(&blog).Association("Tags").Count() != 0 {
+                       t.Errorf("All tags should be cleared")
+               }
+       }
+}
+
+func TestManyToManyWithCustomizedForeignKeys(t *testing.T) {
+       if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" && dialect != "mssql" {
+               DB.DropTable(&Blog{}, &Tag{})
+               DB.DropTable("shared_blog_tags")
+               DB.CreateTable(&Blog{}, &Tag{})
+               blog := Blog{
+                       Locale:  "ZH",
+                       Subject: "subject",
+                       Body:    "body",
+                       SharedTags: []Tag{
+                               {Locale: "ZH", Value: "tag1"},
+                               {Locale: "ZH", Value: "tag2"},
+                       },
+               }
+               DB.Save(&blog)
+
+               blog2 := Blog{
+                       ID:     blog.ID,
+                       Locale: "EN",
+               }
+               DB.Create(&blog2)
+
+               if !compareTags(blog.SharedTags, []string{"tag1", "tag2"}) {
+                       t.Errorf("Blog should has two tags")
+               }
+
+               // Append
+               var tag3 = &Tag{Locale: "ZH", Value: "tag3"}
+               DB.Model(&blog).Association("SharedTags").Append([]*Tag{tag3})
+               if !compareTags(blog.SharedTags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Blog should has three tags after Append")
+               }
+
+               if DB.Model(&blog).Association("SharedTags").Count() != 3 {
+                       t.Errorf("Blog should has three tags after Append")
+               }
+
+               if DB.Model(&blog2).Association("SharedTags").Count() != 3 {
+                       t.Errorf("Blog should has three tags after Append")
+               }
+
+               var tags []Tag
+               DB.Model(&blog).Related(&tags, "SharedTags")
+               if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Should find 3 tags with Related")
+               }
+
+               DB.Model(&blog2).Related(&tags, "SharedTags")
+               if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Should find 3 tags with Related")
+               }
+
+               var blog1 Blog
+               DB.Preload("SharedTags").Find(&blog1)
+               if !compareTags(blog1.SharedTags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Preload many2many relations")
+               }
+
+               var tag4 = &Tag{Locale: "ZH", Value: "tag4"}
+               DB.Model(&blog2).Association("SharedTags").Append(tag4)
+
+               DB.Model(&blog).Related(&tags, "SharedTags")
+               if !compareTags(tags, []string{"tag1", "tag2", "tag3", "tag4"}) {
+                       t.Errorf("Should find 3 tags with Related")
+               }
+
+               DB.Model(&blog2).Related(&tags, "SharedTags")
+               if !compareTags(tags, []string{"tag1", "tag2", "tag3", "tag4"}) {
+                       t.Errorf("Should find 3 tags with Related")
+               }
+
+               // Replace
+               var tag5 = &Tag{Locale: "ZH", Value: "tag5"}
+               var tag6 = &Tag{Locale: "ZH", Value: "tag6"}
+               DB.Model(&blog2).Association("SharedTags").Replace(tag5, tag6)
+               var tags2 []Tag
+               DB.Model(&blog).Related(&tags2, "SharedTags")
+               if !compareTags(tags2, []string{"tag5", "tag6"}) {
+                       t.Errorf("Should find 2 tags after Replace")
+               }
+
+               DB.Model(&blog2).Related(&tags2, "SharedTags")
+               if !compareTags(tags2, []string{"tag5", "tag6"}) {
+                       t.Errorf("Should find 2 tags after Replace")
+               }
+
+               if DB.Model(&blog).Association("SharedTags").Count() != 2 {
+                       t.Errorf("Blog should has three tags after Replace")
+               }
+
+               // Delete
+               DB.Model(&blog).Association("SharedTags").Delete(tag5)
+               var tags3 []Tag
+               DB.Model(&blog).Related(&tags3, "SharedTags")
+               if !compareTags(tags3, []string{"tag6"}) {
+                       t.Errorf("Should find 1 tags after Delete")
+               }
+
+               if DB.Model(&blog).Association("SharedTags").Count() != 1 {
+                       t.Errorf("Blog should has three tags after Delete")
+               }
+
+               DB.Model(&blog2).Association("SharedTags").Delete(tag3)
+               var tags4 []Tag
+               DB.Model(&blog).Related(&tags4, "SharedTags")
+               if !compareTags(tags4, []string{"tag6"}) {
+                       t.Errorf("Tag should not be deleted when Delete with a unrelated tag")
+               }
+
+               // Clear
+               DB.Model(&blog2).Association("SharedTags").Clear()
+               if DB.Model(&blog).Association("SharedTags").Count() != 0 {
+                       t.Errorf("All tags should be cleared")
+               }
+       }
+}
+
+func TestManyToManyWithCustomizedForeignKeys2(t *testing.T) {
+       if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" && dialect != "mssql" {
+               DB.DropTable(&Blog{}, &Tag{})
+               DB.DropTable("locale_blog_tags")
+               DB.CreateTable(&Blog{}, &Tag{})
+               blog := Blog{
+                       Locale:  "ZH",
+                       Subject: "subject",
+                       Body:    "body",
+                       LocaleTags: []Tag{
+                               {Locale: "ZH", Value: "tag1"},
+                               {Locale: "ZH", Value: "tag2"},
+                       },
+               }
+               DB.Save(&blog)
+
+               blog2 := Blog{
+                       ID:     blog.ID,
+                       Locale: "EN",
+               }
+               DB.Create(&blog2)
+
+               // Append
+               var tag3 = &Tag{Locale: "ZH", Value: "tag3"}
+               DB.Model(&blog).Association("LocaleTags").Append([]*Tag{tag3})
+               if !compareTags(blog.LocaleTags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Blog should has three tags after Append")
+               }
+
+               if DB.Model(&blog).Association("LocaleTags").Count() != 3 {
+                       t.Errorf("Blog should has three tags after Append")
+               }
+
+               if DB.Model(&blog2).Association("LocaleTags").Count() != 0 {
+                       t.Errorf("EN Blog should has 0 tags after ZH Blog Append")
+               }
+
+               var tags []Tag
+               DB.Model(&blog).Related(&tags, "LocaleTags")
+               if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Should find 3 tags with Related")
+               }
+
+               DB.Model(&blog2).Related(&tags, "LocaleTags")
+               if len(tags) != 0 {
+                       t.Errorf("Should find 0 tags with Related for EN Blog")
+               }
+
+               var blog1 Blog
+               DB.Preload("LocaleTags").Find(&blog1, "locale = ? AND id = ?", "ZH", blog.ID)
+               if !compareTags(blog1.LocaleTags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Preload many2many relations")
+               }
+
+               var tag4 = &Tag{Locale: "ZH", Value: "tag4"}
+               DB.Model(&blog2).Association("LocaleTags").Append(tag4)
+
+               DB.Model(&blog).Related(&tags, "LocaleTags")
+               if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("Should find 3 tags with Related for EN Blog")
+               }
+
+               DB.Model(&blog2).Related(&tags, "LocaleTags")
+               if !compareTags(tags, []string{"tag4"}) {
+                       t.Errorf("Should find 1 tags with Related for EN Blog")
+               }
+
+               // Replace
+               var tag5 = &Tag{Locale: "ZH", Value: "tag5"}
+               var tag6 = &Tag{Locale: "ZH", Value: "tag6"}
+               DB.Model(&blog2).Association("LocaleTags").Replace(tag5, tag6)
+
+               var tags2 []Tag
+               DB.Model(&blog).Related(&tags2, "LocaleTags")
+               if !compareTags(tags2, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("CN Blog's tags should not be changed after EN Blog Replace")
+               }
+
+               var blog11 Blog
+               DB.Preload("LocaleTags").First(&blog11, "id = ? AND locale = ?", blog.ID, blog.Locale)
+               if !compareTags(blog11.LocaleTags, []string{"tag1", "tag2", "tag3"}) {
+                       t.Errorf("CN Blog's tags should not be changed after EN Blog Replace")
+               }
+
+               DB.Model(&blog2).Related(&tags2, "LocaleTags")
+               if !compareTags(tags2, []string{"tag5", "tag6"}) {
+                       t.Errorf("Should find 2 tags after Replace")
+               }
+
+               var blog21 Blog
+               DB.Preload("LocaleTags").First(&blog21, "id = ? AND locale = ?", blog2.ID, blog2.Locale)
+               if !compareTags(blog21.LocaleTags, []string{"tag5", "tag6"}) {
+                       t.Errorf("EN Blog's tags should be changed after Replace")
+               }
+
+               if DB.Model(&blog).Association("LocaleTags").Count() != 3 {
+                       t.Errorf("ZH Blog should has three tags after Replace")
+               }
+
+               if DB.Model(&blog2).Association("LocaleTags").Count() != 2 {
+                       t.Errorf("EN Blog should has two tags after Replace")
+               }
+
+               // Delete
+               DB.Model(&blog).Association("LocaleTags").Delete(tag5)
+
+               if DB.Model(&blog).Association("LocaleTags").Count() != 3 {
+                       t.Errorf("ZH Blog should has three tags after Delete with EN's tag")
+               }
+
+               if DB.Model(&blog2).Association("LocaleTags").Count() != 2 {
+                       t.Errorf("EN Blog should has two tags after ZH Blog Delete with EN's tag")
+               }
+
+               DB.Model(&blog2).Association("LocaleTags").Delete(tag5)
+
+               if DB.Model(&blog).Association("LocaleTags").Count() != 3 {
+                       t.Errorf("ZH Blog should has three tags after EN Blog Delete with EN's tag")
+               }
+
+               if DB.Model(&blog2).Association("LocaleTags").Count() != 1 {
+                       t.Errorf("EN Blog should has 1 tags after EN Blog Delete with EN's tag")
+               }
+
+               // Clear
+               DB.Model(&blog2).Association("LocaleTags").Clear()
+               if DB.Model(&blog).Association("LocaleTags").Count() != 3 {
+                       t.Errorf("ZH Blog's tags should not be cleared when clear EN Blog's tags")
+               }
+
+               if DB.Model(&blog2).Association("LocaleTags").Count() != 0 {
+                       t.Errorf("EN Blog's tags should be cleared when clear EN Blog's tags")
+               }
+
+               DB.Model(&blog).Association("LocaleTags").Clear()
+               if DB.Model(&blog).Association("LocaleTags").Count() != 0 {
+                       t.Errorf("ZH Blog's tags should be cleared when clear ZH Blog's tags")
+               }
+
+               if DB.Model(&blog2).Association("LocaleTags").Count() != 0 {
+                       t.Errorf("EN Blog's tags should be cleared")
+               }
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/naming.go b/vendor/github.com/jinzhu/gorm/naming.go
new file mode 100755 (executable)
index 0000000..6b0a4fd
--- /dev/null
@@ -0,0 +1,124 @@
+package gorm
+
+import (
+       "bytes"
+       "strings"
+)
+
+// Namer is a function type which is given a string and return a string
+type Namer func(string) string
+
+// NamingStrategy represents naming strategies
+type NamingStrategy struct {
+       DB     Namer
+       Table  Namer
+       Column Namer
+}
+
+// TheNamingStrategy is being initialized with defaultNamingStrategy
+var TheNamingStrategy = &NamingStrategy{
+       DB:     defaultNamer,
+       Table:  defaultNamer,
+       Column: defaultNamer,
+}
+
+// AddNamingStrategy sets the naming strategy
+func AddNamingStrategy(ns *NamingStrategy) {
+       if ns.DB == nil {
+               ns.DB = defaultNamer
+       }
+       if ns.Table == nil {
+               ns.Table = defaultNamer
+       }
+       if ns.Column == nil {
+               ns.Column = defaultNamer
+       }
+       TheNamingStrategy = ns
+}
+
+// DBName alters the given name by DB
+func (ns *NamingStrategy) DBName(name string) string {
+       return ns.DB(name)
+}
+
+// TableName alters the given name by Table
+func (ns *NamingStrategy) TableName(name string) string {
+       return ns.Table(name)
+}
+
+// ColumnName alters the given name by Column
+func (ns *NamingStrategy) ColumnName(name string) string {
+       return ns.Column(name)
+}
+
+// ToDBName convert string to db name
+func ToDBName(name string) string {
+       return TheNamingStrategy.DBName(name)
+}
+
+// ToTableName convert string to table name
+func ToTableName(name string) string {
+       return TheNamingStrategy.TableName(name)
+}
+
+// ToColumnName convert string to db name
+func ToColumnName(name string) string {
+       return TheNamingStrategy.ColumnName(name)
+}
+
+var smap = newSafeMap()
+
+func defaultNamer(name string) string {
+       const (
+               lower = false
+               upper = true
+       )
+
+       if v := smap.Get(name); v != "" {
+               return v
+       }
+
+       if name == "" {
+               return ""
+       }
+
+       var (
+               value                                    = commonInitialismsReplacer.Replace(name)
+               buf                                      = bytes.NewBufferString("")
+               lastCase, currCase, nextCase, nextNumber bool
+       )
+
+       for i, v := range value[:len(value)-1] {
+               nextCase = bool(value[i+1] >= 'A' && value[i+1] <= 'Z')
+               nextNumber = bool(value[i+1] >= '0' && value[i+1] <= '9')
+
+               if i > 0 {
+                       if currCase == upper {
+                               if lastCase == upper && (nextCase == upper || nextNumber == upper) {
+                                       buf.WriteRune(v)
+                               } else {
+                                       if value[i-1] != '_' && value[i+1] != '_' {
+                                               buf.WriteRune('_')
+                                       }
+                                       buf.WriteRune(v)
+                               }
+                       } else {
+                               buf.WriteRune(v)
+                               if i == len(value)-2 && (nextCase == upper && nextNumber == lower) {
+                                       buf.WriteRune('_')
+                               }
+                       }
+               } else {
+                       currCase = upper
+                       buf.WriteRune(v)
+               }
+               lastCase = currCase
+               currCase = nextCase
+       }
+
+       buf.WriteByte(value[len(value)-1])
+
+       s := strings.ToLower(buf.String())
+       smap.Set(name, s)
+       return s
+}
diff --git a/vendor/github.com/jinzhu/gorm/naming_test.go b/vendor/github.com/jinzhu/gorm/naming_test.go
new file mode 100755 (executable)
index 0000000..0c6f771
--- /dev/null
@@ -0,0 +1,69 @@
+package gorm_test
+
+import (
+       "testing"
+
+       "github.com/jinzhu/gorm"
+)
+
+func TestTheNamingStrategy(t *testing.T) {
+
+       cases := []struct {
+               name     string
+               namer    gorm.Namer
+               expected string
+       }{
+               {name: "auth", expected: "auth", namer: gorm.TheNamingStrategy.DB},
+               {name: "userRestrictions", expected: "user_restrictions", namer: gorm.TheNamingStrategy.Table},
+               {name: "clientID", expected: "client_id", namer: gorm.TheNamingStrategy.Column},
+       }
+
+       for _, c := range cases {
+               t.Run(c.name, func(t *testing.T) {
+                       result := c.namer(c.name)
+                       if result != c.expected {
+                               t.Errorf("error in naming strategy. expected: %v got :%v\n", c.expected, result)
+                       }
+               })
+       }
+
+}
+
+func TestNamingStrategy(t *testing.T) {
+
+       dbNameNS := func(name string) string {
+               return "db_" + name
+       }
+       tableNameNS := func(name string) string {
+               return "tbl_" + name
+       }
+       columnNameNS := func(name string) string {
+               return "col_" + name
+       }
+
+       ns := &gorm.NamingStrategy{
+               DB:     dbNameNS,
+               Table:  tableNameNS,
+               Column: columnNameNS,
+       }
+
+       cases := []struct {
+               name     string
+               namer    gorm.Namer
+               expected string
+       }{
+               {name: "auth", expected: "db_auth", namer: ns.DB},
+               {name: "user", expected: "tbl_user", namer: ns.Table},
+               {name: "password", expected: "col_password", namer: ns.Column},
+       }
+
+       for _, c := range cases {
+               t.Run(c.name, func(t *testing.T) {
+                       result := c.namer(c.name)
+                       if result != c.expected {
+                               t.Errorf("error in naming strategy. expected: %v got :%v\n", c.expected, result)
+                       }
+               })
+       }
+
+}
diff --git a/vendor/github.com/jinzhu/gorm/pointer_test.go b/vendor/github.com/jinzhu/gorm/pointer_test.go
new file mode 100755 (executable)
index 0000000..2a68a5a
--- /dev/null
@@ -0,0 +1,84 @@
+package gorm_test
+
+import "testing"
+
+type PointerStruct struct {
+       ID   int64
+       Name *string
+       Num  *int
+}
+
+type NormalStruct struct {
+       ID   int64
+       Name string
+       Num  int
+}
+
+func TestPointerFields(t *testing.T) {
+       DB.DropTable(&PointerStruct{})
+       DB.AutoMigrate(&PointerStruct{})
+       var name = "pointer struct 1"
+       var num = 100
+       pointerStruct := PointerStruct{Name: &name, Num: &num}
+       if DB.Create(&pointerStruct).Error != nil {
+               t.Errorf("Failed to save pointer struct")
+       }
+
+       var pointerStructResult PointerStruct
+       if err := DB.First(&pointerStructResult, "id = ?", pointerStruct.ID).Error; err != nil || *pointerStructResult.Name != name || *pointerStructResult.Num != num {
+               t.Errorf("Failed to query saved pointer struct")
+       }
+
+       var tableName = DB.NewScope(&PointerStruct{}).TableName()
+
+       var normalStruct NormalStruct
+       DB.Table(tableName).First(&normalStruct)
+       if normalStruct.Name != name || normalStruct.Num != num {
+               t.Errorf("Failed to query saved Normal struct")
+       }
+
+       var nilPointerStruct = PointerStruct{}
+       if err := DB.Create(&nilPointerStruct).Error; err != nil {
+               t.Error("Failed to save nil pointer struct", err)
+       }
+
+       var pointerStruct2 PointerStruct
+       if err := DB.First(&pointerStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil {
+               t.Error("Failed to query saved nil pointer struct", err)
+       }
+
+       var normalStruct2 NormalStruct
+       if err := DB.Table(tableName).First(&normalStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil {
+               t.Error("Failed to query saved nil pointer struct", err)
+       }
+
+       var partialNilPointerStruct1 = PointerStruct{Num: &num}
+       if err := DB.Create(&partialNilPointerStruct1).Error; err != nil {
+               t.Error("Failed to save partial nil pointer struct", err)
+       }
+
+       var pointerStruct3 PointerStruct
+       if err := DB.First(&pointerStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || *pointerStruct3.Num != num {
+               t.Error("Failed to query saved partial nil pointer struct", err)
+       }
+
+       var normalStruct3 NormalStruct
+       if err := DB.Table(tableName).First(&normalStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || normalStruct3.Num != num {
+               t.Error("Failed to query saved partial pointer struct", err)
+       }
+
+       var partialNilPointerStruct2 = PointerStruct{Name: &name}
+       if err := DB.Create(&partialNilPointerStruct2).Error; err != nil {
+               t.Error("Failed to save partial nil pointer struct", err)
+       }
+
+       var pointerStruct4 PointerStruct
+       if err := DB.First(&pointerStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || *pointerStruct4.Name != name {
+               t.Error("Failed to query saved partial nil pointer struct", err)
+       }
+
+       var normalStruct4 NormalStruct
+       if err := DB.Table(tableName).First(&normalStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || normalStruct4.Name != name {
+               t.Error("Failed to query saved partial pointer struct", err)
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/polymorphic_test.go b/vendor/github.com/jinzhu/gorm/polymorphic_test.go
new file mode 100755 (executable)
index 0000000..d1ecfbb
--- /dev/null
@@ -0,0 +1,366 @@
+package gorm_test
+
+import (
+       "reflect"
+       "sort"
+       "testing"
+)
+
+type Cat struct {
+       Id   int
+       Name string
+       Toy  Toy `gorm:"polymorphic:Owner;"`
+}
+
+type Dog struct {
+       Id   int
+       Name string
+       Toys []Toy `gorm:"polymorphic:Owner;"`
+}
+
+type Hamster struct {
+       Id           int
+       Name         string
+       PreferredToy Toy `gorm:"polymorphic:Owner;polymorphic_value:hamster_preferred"`
+       OtherToy     Toy `gorm:"polymorphic:Owner;polymorphic_value:hamster_other"`
+}
+
+type Toy struct {
+       Id        int
+       Name      string
+       OwnerId   int
+       OwnerType string
+}
+
+var compareToys = func(toys []Toy, contents []string) bool {
+       var toyContents []string
+       for _, toy := range toys {
+               toyContents = append(toyContents, toy.Name)
+       }
+       sort.Strings(toyContents)
+       sort.Strings(contents)
+       return reflect.DeepEqual(toyContents, contents)
+}
+
+func TestPolymorphic(t *testing.T) {
+       cat := Cat{Name: "Mr. Bigglesworth", Toy: Toy{Name: "cat toy"}}
+       dog := Dog{Name: "Pluto", Toys: []Toy{{Name: "dog toy 1"}, {Name: "dog toy 2"}}}
+       DB.Save(&cat).Save(&dog)
+
+       if DB.Model(&cat).Association("Toy").Count() != 1 {
+               t.Errorf("Cat's toys count should be 1")
+       }
+
+       if DB.Model(&dog).Association("Toys").Count() != 2 {
+               t.Errorf("Dog's toys count should be 2")
+       }
+
+       // Query
+       var catToys []Toy
+       if DB.Model(&cat).Related(&catToys, "Toy").RecordNotFound() {
+               t.Errorf("Did not find any has one polymorphic association")
+       } else if len(catToys) != 1 {
+               t.Errorf("Should have found only one polymorphic has one association")
+       } else if catToys[0].Name != cat.Toy.Name {
+               t.Errorf("Should have found the proper has one polymorphic association")
+       }
+
+       var dogToys []Toy
+       if DB.Model(&dog).Related(&dogToys, "Toys").RecordNotFound() {
+               t.Errorf("Did not find any polymorphic has many associations")
+       } else if len(dogToys) != len(dog.Toys) {
+               t.Errorf("Should have found all polymorphic has many associations")
+       }
+
+       var catToy Toy
+       DB.Model(&cat).Association("Toy").Find(&catToy)
+       if catToy.Name != cat.Toy.Name {
+               t.Errorf("Should find has one polymorphic association")
+       }
+
+       var dogToys1 []Toy
+       DB.Model(&dog).Association("Toys").Find(&dogToys1)
+       if !compareToys(dogToys1, []string{"dog toy 1", "dog toy 2"}) {
+               t.Errorf("Should find has many polymorphic association")
+       }
+
+       // Append
+       DB.Model(&cat).Association("Toy").Append(&Toy{
+               Name: "cat toy 2",
+       })
+
+       var catToy2 Toy
+       DB.Model(&cat).Association("Toy").Find(&catToy2)
+       if catToy2.Name != "cat toy 2" {
+               t.Errorf("Should update has one polymorphic association with Append")
+       }
+
+       if DB.Model(&cat).Association("Toy").Count() != 1 {
+               t.Errorf("Cat's toys count should be 1 after Append")
+       }
+
+       if DB.Model(&dog).Association("Toys").Count() != 2 {
+               t.Errorf("Should return two polymorphic has many associations")
+       }
+
+       DB.Model(&dog).Association("Toys").Append(&Toy{
+               Name: "dog toy 3",
+       })
+
+       var dogToys2 []Toy
+       DB.Model(&dog).Association("Toys").Find(&dogToys2)
+       if !compareToys(dogToys2, []string{"dog toy 1", "dog toy 2", "dog toy 3"}) {
+               t.Errorf("Dog's toys should be updated with Append")
+       }
+
+       if DB.Model(&dog).Association("Toys").Count() != 3 {
+               t.Errorf("Should return three polymorphic has many associations")
+       }
+
+       // Replace
+       DB.Model(&cat).Association("Toy").Replace(&Toy{
+               Name: "cat toy 3",
+       })
+
+       var catToy3 Toy
+       DB.Model(&cat).Association("Toy").Find(&catToy3)
+       if catToy3.Name != "cat toy 3" {
+               t.Errorf("Should update has one polymorphic association with Replace")
+       }
+
+       if DB.Model(&cat).Association("Toy").Count() != 1 {
+               t.Errorf("Cat's toys count should be 1 after Replace")
+       }
+
+       if DB.Model(&dog).Association("Toys").Count() != 3 {
+               t.Errorf("Should return three polymorphic has many associations")
+       }
+
+       DB.Model(&dog).Association("Toys").Replace(&Toy{
+               Name: "dog toy 4",
+       }, []Toy{
+               {Name: "dog toy 5"}, {Name: "dog toy 6"}, {Name: "dog toy 7"},
+       })
+
+       var dogToys3 []Toy
+       DB.Model(&dog).Association("Toys").Find(&dogToys3)
+       if !compareToys(dogToys3, []string{"dog toy 4", "dog toy 5", "dog toy 6", "dog toy 7"}) {
+               t.Errorf("Dog's toys should be updated with Replace")
+       }
+
+       if DB.Model(&dog).Association("Toys").Count() != 4 {
+               t.Errorf("Should return three polymorphic has many associations")
+       }
+
+       // Delete
+       DB.Model(&cat).Association("Toy").Delete(&catToy2)
+
+       var catToy4 Toy
+       DB.Model(&cat).Association("Toy").Find(&catToy4)
+       if catToy4.Name != "cat toy 3" {
+               t.Errorf("Should not update has one polymorphic association when Delete a unrelated Toy")
+       }
+
+       if DB.Model(&cat).Association("Toy").Count() != 1 {
+               t.Errorf("Cat's toys count should be 1")
+       }
+
+       if DB.Model(&dog).Association("Toys").Count() != 4 {
+               t.Errorf("Dog's toys count should be 4")
+       }
+
+       DB.Model(&cat).Association("Toy").Delete(&catToy3)
+
+       if !DB.Model(&cat).Related(&Toy{}, "Toy").RecordNotFound() {
+               t.Errorf("Toy should be deleted with Delete")
+       }
+
+       if DB.Model(&cat).Association("Toy").Count() != 0 {
+               t.Errorf("Cat's toys count should be 0 after Delete")
+       }
+
+       if DB.Model(&dog).Association("Toys").Count() != 4 {
+               t.Errorf("Dog's toys count should not be changed when delete cat's toy")
+       }
+
+       DB.Model(&dog).Association("Toys").Delete(&dogToys2)
+
+       if DB.Model(&dog).Association("Toys").Count() != 4 {
+               t.Errorf("Dog's toys count should not be changed when delete unrelated toys")
+       }
+
+       DB.Model(&dog).Association("Toys").Delete(&dogToys3)
+
+       if DB.Model(&dog).Association("Toys").Count() != 0 {
+               t.Errorf("Dog's toys count should be deleted with Delete")
+       }
+
+       // Clear
+       DB.Model(&cat).Association("Toy").Append(&Toy{
+               Name: "cat toy 2",
+       })
+
+       if DB.Model(&cat).Association("Toy").Count() != 1 {
+               t.Errorf("Cat's toys should be added with Append")
+       }
+
+       DB.Model(&cat).Association("Toy").Clear()
+
+       if DB.Model(&cat).Association("Toy").Count() != 0 {
+               t.Errorf("Cat's toys should be cleared with Clear")
+       }
+
+       DB.Model(&dog).Association("Toys").Append(&Toy{
+               Name: "dog toy 8",
+       })
+
+       if DB.Model(&dog).Association("Toys").Count() != 1 {
+               t.Errorf("Dog's toys should be added with Append")
+       }
+
+       DB.Model(&dog).Association("Toys").Clear()
+
+       if DB.Model(&dog).Association("Toys").Count() != 0 {
+               t.Errorf("Dog's toys should be cleared with Clear")
+       }
+}
+
+func TestNamedPolymorphic(t *testing.T) {
+       hamster := Hamster{Name: "Mr. Hammond", PreferredToy: Toy{Name: "bike"}, OtherToy: Toy{Name: "treadmill"}}
+       DB.Save(&hamster)
+
+       hamster2 := Hamster{}
+       DB.Preload("PreferredToy").Preload("OtherToy").Find(&hamster2, hamster.Id)
+       if hamster2.PreferredToy.Id != hamster.PreferredToy.Id || hamster2.PreferredToy.Name != hamster.PreferredToy.Name {
+               t.Errorf("Hamster's preferred toy couldn't be preloaded")
+       }
+       if hamster2.OtherToy.Id != hamster.OtherToy.Id || hamster2.OtherToy.Name != hamster.OtherToy.Name {
+               t.Errorf("Hamster's other toy couldn't be preloaded")
+       }
+
+       // clear to omit Toy.Id in count
+       hamster2.PreferredToy = Toy{}
+       hamster2.OtherToy = Toy{}
+
+       if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 {
+               t.Errorf("Hamster's preferred toy count should be 1")
+       }
+
+       if DB.Model(&hamster2).Association("OtherToy").Count() != 1 {
+               t.Errorf("Hamster's other toy count should be 1")
+       }
+
+       // Query
+       var hamsterToys []Toy
+       if DB.Model(&hamster).Related(&hamsterToys, "PreferredToy").RecordNotFound() {
+               t.Errorf("Did not find any has one polymorphic association")
+       } else if len(hamsterToys) != 1 {
+               t.Errorf("Should have found only one polymorphic has one association")
+       } else if hamsterToys[0].Name != hamster.PreferredToy.Name {
+               t.Errorf("Should have found the proper has one polymorphic association")
+       }
+
+       if DB.Model(&hamster).Related(&hamsterToys, "OtherToy").RecordNotFound() {
+               t.Errorf("Did not find any has one polymorphic association")
+       } else if len(hamsterToys) != 1 {
+               t.Errorf("Should have found only one polymorphic has one association")
+       } else if hamsterToys[0].Name != hamster.OtherToy.Name {
+               t.Errorf("Should have found the proper has one polymorphic association")
+       }
+
+       hamsterToy := Toy{}
+       DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy)
+       if hamsterToy.Name != hamster.PreferredToy.Name {
+               t.Errorf("Should find has one polymorphic association")
+       }
+       hamsterToy = Toy{}
+       DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy)
+       if hamsterToy.Name != hamster.OtherToy.Name {
+               t.Errorf("Should find has one polymorphic association")
+       }
+
+       // Append
+       DB.Model(&hamster).Association("PreferredToy").Append(&Toy{
+               Name: "bike 2",
+       })
+       DB.Model(&hamster).Association("OtherToy").Append(&Toy{
+               Name: "treadmill 2",
+       })
+
+       hamsterToy = Toy{}
+       DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy)
+       if hamsterToy.Name != "bike 2" {
+               t.Errorf("Should update has one polymorphic association with Append")
+       }
+
+       hamsterToy = Toy{}
+       DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy)
+       if hamsterToy.Name != "treadmill 2" {
+               t.Errorf("Should update has one polymorphic association with Append")
+       }
+
+       if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 {
+               t.Errorf("Hamster's toys count should be 1 after Append")
+       }
+
+       if DB.Model(&hamster2).Association("OtherToy").Count() != 1 {
+               t.Errorf("Hamster's toys count should be 1 after Append")
+       }
+
+       // Replace
+       DB.Model(&hamster).Association("PreferredToy").Replace(&Toy{
+               Name: "bike 3",
+       })
+       DB.Model(&hamster).Association("OtherToy").Replace(&Toy{
+               Name: "treadmill 3",
+       })
+
+       hamsterToy = Toy{}
+       DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy)
+       if hamsterToy.Name != "bike 3" {
+               t.Errorf("Should update has one polymorphic association with Replace")
+       }
+
+       hamsterToy = Toy{}
+       DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy)
+       if hamsterToy.Name != "treadmill 3" {
+               t.Errorf("Should update has one polymorphic association with Replace")
+       }
+
+       if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 {
+               t.Errorf("hamster's toys count should be 1 after Replace")
+       }
+
+       if DB.Model(&hamster2).Association("OtherToy").Count() != 1 {
+               t.Errorf("hamster's toys count should be 1 after Replace")
+       }
+
+       // Clear
+       DB.Model(&hamster).Association("PreferredToy").Append(&Toy{
+               Name: "bike 2",
+       })
+       DB.Model(&hamster).Association("OtherToy").Append(&Toy{
+               Name: "treadmill 2",
+       })
+
+       if DB.Model(&hamster).Association("PreferredToy").Count() != 1 {
+               t.Errorf("Hamster's toys should be added with Append")
+       }
+       if DB.Model(&hamster).Association("OtherToy").Count() != 1 {
+               t.Errorf("Hamster's toys should be added with Append")
+       }
+
+       DB.Model(&hamster).Association("PreferredToy").Clear()
+
+       if DB.Model(&hamster2).Association("PreferredToy").Count() != 0 {
+               t.Errorf("Hamster's preferred toy should be cleared with Clear")
+       }
+       if DB.Model(&hamster2).Association("OtherToy").Count() != 1 {
+               t.Errorf("Hamster's other toy should be still available")
+       }
+
+       DB.Model(&hamster).Association("OtherToy").Clear()
+       if DB.Model(&hamster).Association("OtherToy").Count() != 0 {
+               t.Errorf("Hamster's other toy should be cleared with Clear")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/preload_test.go b/vendor/github.com/jinzhu/gorm/preload_test.go
new file mode 100755 (executable)
index 0000000..1db625c
--- /dev/null
@@ -0,0 +1,1700 @@
+package gorm_test
+
+import (
+       "database/sql"
+       "encoding/json"
+       "os"
+       "reflect"
+       "testing"
+
+       "github.com/jinzhu/gorm"
+)
+
+func getPreloadUser(name string) *User {
+       return getPreparedUser(name, "Preload")
+}
+
+func checkUserHasPreloadData(user User, t *testing.T) {
+       u := getPreloadUser(user.Name)
+       if user.BillingAddress.Address1 != u.BillingAddress.Address1 {
+               t.Error("Failed to preload user's BillingAddress")
+       }
+
+       if user.ShippingAddress.Address1 != u.ShippingAddress.Address1 {
+               t.Error("Failed to preload user's ShippingAddress")
+       }
+
+       if user.CreditCard.Number != u.CreditCard.Number {
+               t.Error("Failed to preload user's CreditCard")
+       }
+
+       if user.Company.Name != u.Company.Name {
+               t.Error("Failed to preload user's Company")
+       }
+
+       if len(user.Emails) != len(u.Emails) {
+               t.Error("Failed to preload user's Emails")
+       } else {
+               var found int
+               for _, e1 := range u.Emails {
+                       for _, e2 := range user.Emails {
+                               if e1.Email == e2.Email {
+                                       found++
+                                       break
+                               }
+                       }
+               }
+               if found != len(u.Emails) {
+                       t.Error("Failed to preload user's email details")
+               }
+       }
+}
+
+func TestPreload(t *testing.T) {
+       user1 := getPreloadUser("user1")
+       DB.Save(user1)
+
+       preloadDB := DB.Where("role = ?", "Preload").Preload("BillingAddress").Preload("ShippingAddress").
+               Preload("CreditCard").Preload("Emails").Preload("Company")
+       var user User
+       preloadDB.Find(&user)
+       checkUserHasPreloadData(user, t)
+
+       user2 := getPreloadUser("user2")
+       DB.Save(user2)
+
+       user3 := getPreloadUser("user3")
+       DB.Save(user3)
+
+       var users []User
+       preloadDB.Find(&users)
+
+       for _, user := range users {
+               checkUserHasPreloadData(user, t)
+       }
+
+       var users2 []*User
+       preloadDB.Find(&users2)
+
+       for _, user := range users2 {
+               checkUserHasPreloadData(*user, t)
+       }
+
+       var users3 []*User
+       preloadDB.Preload("Emails", "email = ?", user3.Emails[0].Email).Find(&users3)
+
+       for _, user := range users3 {
+               if user.Name == user3.Name {
+                       if len(user.Emails) != 1 {
+                               t.Errorf("should only preload one emails for user3 when with condition")
+                       }
+               } else if len(user.Emails) != 0 {
+                       t.Errorf("should not preload any emails for other users when with condition")
+               } else if user.Emails == nil {
+                       t.Errorf("should return an empty slice to indicate zero results")
+               }
+       }
+}
+
+func TestAutoPreload(t *testing.T) {
+       user1 := getPreloadUser("auto_user1")
+       DB.Save(user1)
+
+       preloadDB := DB.Set("gorm:auto_preload", true).Where("role = ?", "Preload")
+       var user User
+       preloadDB.Find(&user)
+       checkUserHasPreloadData(user, t)
+
+       user2 := getPreloadUser("auto_user2")
+       DB.Save(user2)
+
+       var users []User
+       preloadDB.Find(&users)
+
+       for _, user := range users {
+               checkUserHasPreloadData(user, t)
+       }
+
+       var users2 []*User
+       preloadDB.Find(&users2)
+
+       for _, user := range users2 {
+               checkUserHasPreloadData(*user, t)
+       }
+}
+
+func TestAutoPreloadFalseDoesntPreload(t *testing.T) {
+       user1 := getPreloadUser("auto_user1")
+       DB.Save(user1)
+
+       preloadDB := DB.Set("gorm:auto_preload", false).Where("role = ?", "Preload")
+       var user User
+       preloadDB.Find(&user)
+
+       if user.BillingAddress.Address1 != "" {
+               t.Error("AutoPreload was set to fasle, but still fetched data")
+       }
+
+       user2 := getPreloadUser("auto_user2")
+       DB.Save(user2)
+
+       var users []User
+       preloadDB.Find(&users)
+
+       for _, user := range users {
+               if user.BillingAddress.Address1 != "" {
+                       t.Error("AutoPreload was set to fasle, but still fetched data")
+               }
+       }
+}
+
+func TestNestedPreload1(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1   Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID     uint
+                       Name   string
+                       Level2 Level2
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level3{Level2: Level2{Level1: Level1{Value: "value"}}}
+       if err := DB.Create(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level3
+       if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+
+       if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got, "name = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
+               t.Error(err)
+       }
+}
+
+func TestNestedPreload2(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1s  []*Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID      uint
+                       Name    string
+                       Level2s []Level2
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level3{
+               Level2s: []Level2{
+                       {
+                               Level1s: []*Level1{
+                                       {Value: "value1"},
+                                       {Value: "value2"},
+                               },
+                       },
+                       {
+                               Level1s: []*Level1{
+                                       {Value: "value3"},
+                               },
+                       },
+               },
+       }
+       if err := DB.Create(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level3
+       if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestNestedPreload3(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1   Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       Name    string
+                       ID      uint
+                       Level2s []Level2
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level3{
+               Level2s: []Level2{
+                       {Level1: Level1{Value: "value1"}},
+                       {Level1: Level1{Value: "value2"}},
+               },
+       }
+       if err := DB.Create(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level3
+       if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestNestedPreload4(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1s  []Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID     uint
+                       Name   string
+                       Level2 Level2
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level3{
+               Level2: Level2{
+                       Level1s: []Level1{
+                               {Value: "value1"},
+                               {Value: "value2"},
+                       },
+               },
+       }
+       if err := DB.Create(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level3
+       if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+// Slice: []Level3
+func TestNestedPreload5(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1   Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID     uint
+                       Name   string
+                       Level2 Level2
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := make([]Level3, 2)
+       want[0] = Level3{Level2: Level2{Level1: Level1{Value: "value"}}}
+       if err := DB.Create(&want[0]).Error; err != nil {
+               t.Error(err)
+       }
+       want[1] = Level3{Level2: Level2{Level1: Level1{Value: "value2"}}}
+       if err := DB.Create(&want[1]).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got []Level3
+       if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestNestedPreload6(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1s  []Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID      uint
+                       Name    string
+                       Level2s []Level2
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := make([]Level3, 2)
+       want[0] = Level3{
+               Level2s: []Level2{
+                       {
+                               Level1s: []Level1{
+                                       {Value: "value1"},
+                                       {Value: "value2"},
+                               },
+                       },
+                       {
+                               Level1s: []Level1{
+                                       {Value: "value3"},
+                               },
+                       },
+               },
+       }
+       if err := DB.Create(&want[0]).Error; err != nil {
+               t.Error(err)
+       }
+
+       want[1] = Level3{
+               Level2s: []Level2{
+                       {
+                               Level1s: []Level1{
+                                       {Value: "value3"},
+                                       {Value: "value4"},
+                               },
+                       },
+                       {
+                               Level1s: []Level1{
+                                       {Value: "value5"},
+                               },
+                       },
+               },
+       }
+       if err := DB.Create(&want[1]).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got []Level3
+       if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestNestedPreload7(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1   Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID      uint
+                       Name    string
+                       Level2s []Level2
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := make([]Level3, 2)
+       want[0] = Level3{
+               Level2s: []Level2{
+                       {Level1: Level1{Value: "value1"}},
+                       {Level1: Level1{Value: "value2"}},
+               },
+       }
+       if err := DB.Create(&want[0]).Error; err != nil {
+               t.Error(err)
+       }
+
+       want[1] = Level3{
+               Level2s: []Level2{
+                       {Level1: Level1{Value: "value3"}},
+                       {Level1: Level1{Value: "value4"}},
+               },
+       }
+       if err := DB.Create(&want[1]).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got []Level3
+       if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestNestedPreload8(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1s  []Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID     uint
+                       Name   string
+                       Level2 Level2
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := make([]Level3, 2)
+       want[0] = Level3{
+               Level2: Level2{
+                       Level1s: []Level1{
+                               {Value: "value1"},
+                               {Value: "value2"},
+                       },
+               },
+       }
+       if err := DB.Create(&want[0]).Error; err != nil {
+               t.Error(err)
+       }
+       want[1] = Level3{
+               Level2: Level2{
+                       Level1s: []Level1{
+                               {Value: "value3"},
+                               {Value: "value4"},
+                       },
+               },
+       }
+       if err := DB.Create(&want[1]).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got []Level3
+       if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestNestedPreload9(t *testing.T) {
+       type (
+               Level0 struct {
+                       ID       uint
+                       Value    string
+                       Level1ID uint
+               }
+               Level1 struct {
+                       ID         uint
+                       Value      string
+                       Level2ID   uint
+                       Level2_1ID uint
+                       Level0s    []Level0
+               }
+               Level2 struct {
+                       ID       uint
+                       Level1s  []Level1
+                       Level3ID uint
+               }
+               Level2_1 struct {
+                       ID       uint
+                       Level1s  []Level1
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID       uint
+                       Name     string
+                       Level2   Level2
+                       Level2_1 Level2_1
+               }
+       )
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level2_1{})
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists(&Level0{})
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}, &Level2_1{}, &Level0{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := make([]Level3, 2)
+       want[0] = Level3{
+               Level2: Level2{
+                       Level1s: []Level1{
+                               {Value: "value1"},
+                               {Value: "value2"},
+                       },
+               },
+               Level2_1: Level2_1{
+                       Level1s: []Level1{
+                               {
+                                       Value:   "value1-1",
+                                       Level0s: []Level0{{Value: "Level0-1"}},
+                               },
+                               {
+                                       Value:   "value2-2",
+                                       Level0s: []Level0{{Value: "Level0-2"}},
+                               },
+                       },
+               },
+       }
+       if err := DB.Create(&want[0]).Error; err != nil {
+               t.Error(err)
+       }
+       want[1] = Level3{
+               Level2: Level2{
+                       Level1s: []Level1{
+                               {Value: "value3"},
+                               {Value: "value4"},
+                       },
+               },
+               Level2_1: Level2_1{
+                       Level1s: []Level1{
+                               {
+                                       Value:   "value3-3",
+                                       Level0s: []Level0{},
+                               },
+                               {
+                                       Value:   "value4-4",
+                                       Level0s: []Level0{},
+                               },
+                       },
+               },
+       }
+       if err := DB.Create(&want[1]).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got []Level3
+       if err := DB.Preload("Level2").Preload("Level2.Level1s").Preload("Level2_1").Preload("Level2_1.Level1s").Preload("Level2_1.Level1s.Level0s").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+type LevelA1 struct {
+       ID    uint
+       Value string
+}
+
+type LevelA2 struct {
+       ID       uint
+       Value    string
+       LevelA3s []*LevelA3
+}
+
+type LevelA3 struct {
+       ID        uint
+       Value     string
+       LevelA1ID sql.NullInt64
+       LevelA1   *LevelA1
+       LevelA2ID sql.NullInt64
+       LevelA2   *LevelA2
+}
+
+func TestNestedPreload10(t *testing.T) {
+       DB.DropTableIfExists(&LevelA3{})
+       DB.DropTableIfExists(&LevelA2{})
+       DB.DropTableIfExists(&LevelA1{})
+
+       if err := DB.AutoMigrate(&LevelA1{}, &LevelA2{}, &LevelA3{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       levelA1 := &LevelA1{Value: "foo"}
+       if err := DB.Save(levelA1).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := []*LevelA2{
+               {
+                       Value: "bar",
+                       LevelA3s: []*LevelA3{
+                               {
+                                       Value:   "qux",
+                                       LevelA1: levelA1,
+                               },
+                       },
+               },
+               {
+                       Value:    "bar 2",
+                       LevelA3s: []*LevelA3{},
+               },
+       }
+       for _, levelA2 := range want {
+               if err := DB.Save(levelA2).Error; err != nil {
+                       t.Error(err)
+               }
+       }
+
+       var got []*LevelA2
+       if err := DB.Preload("LevelA3s.LevelA1").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+type LevelB1 struct {
+       ID       uint
+       Value    string
+       LevelB3s []*LevelB3
+}
+
+type LevelB2 struct {
+       ID    uint
+       Value string
+}
+
+type LevelB3 struct {
+       ID        uint
+       Value     string
+       LevelB1ID sql.NullInt64
+       LevelB1   *LevelB1
+       LevelB2s  []*LevelB2 `gorm:"many2many:levelb1_levelb3_levelb2s"`
+}
+
+func TestNestedPreload11(t *testing.T) {
+       DB.DropTableIfExists(&LevelB2{})
+       DB.DropTableIfExists(&LevelB3{})
+       DB.DropTableIfExists(&LevelB1{})
+       if err := DB.AutoMigrate(&LevelB1{}, &LevelB2{}, &LevelB3{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       levelB1 := &LevelB1{Value: "foo"}
+       if err := DB.Create(levelB1).Error; err != nil {
+               t.Error(err)
+       }
+
+       levelB3 := &LevelB3{
+               Value:     "bar",
+               LevelB1ID: sql.NullInt64{Valid: true, Int64: int64(levelB1.ID)},
+       }
+       if err := DB.Create(levelB3).Error; err != nil {
+               t.Error(err)
+       }
+       levelB1.LevelB3s = []*LevelB3{levelB3}
+
+       want := []*LevelB1{levelB1}
+       var got []*LevelB1
+       if err := DB.Preload("LevelB3s.LevelB2s").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+type LevelC1 struct {
+       ID        uint
+       Value     string
+       LevelC2ID uint
+}
+
+type LevelC2 struct {
+       ID      uint
+       Value   string
+       LevelC1 LevelC1
+}
+
+type LevelC3 struct {
+       ID        uint
+       Value     string
+       LevelC2ID uint
+       LevelC2   LevelC2
+}
+
+func TestNestedPreload12(t *testing.T) {
+       DB.DropTableIfExists(&LevelC2{})
+       DB.DropTableIfExists(&LevelC3{})
+       DB.DropTableIfExists(&LevelC1{})
+       if err := DB.AutoMigrate(&LevelC1{}, &LevelC2{}, &LevelC3{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       level2 := LevelC2{
+               Value: "c2",
+               LevelC1: LevelC1{
+                       Value: "c1",
+               },
+       }
+       DB.Create(&level2)
+
+       want := []LevelC3{
+               {
+                       Value:   "c3-1",
+                       LevelC2: level2,
+               }, {
+                       Value:   "c3-2",
+                       LevelC2: level2,
+               },
+       }
+
+       for i := range want {
+               if err := DB.Create(&want[i]).Error; err != nil {
+                       t.Error(err)
+               }
+       }
+
+       var got []LevelC3
+       if err := DB.Preload("LevelC2").Preload("LevelC2.LevelC1").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestManyToManyPreloadWithMultiPrimaryKeys(t *testing.T) {
+       if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" || dialect == "mssql" {
+               return
+       }
+
+       type (
+               Level1 struct {
+                       ID           uint   `gorm:"primary_key;"`
+                       LanguageCode string `gorm:"primary_key"`
+                       Value        string
+               }
+               Level2 struct {
+                       ID           uint   `gorm:"primary_key;"`
+                       LanguageCode string `gorm:"primary_key"`
+                       Value        string
+                       Level1s      []Level1 `gorm:"many2many:levels;"`
+               }
+       )
+
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists("levels")
+
+       if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level2{Value: "Bob", LanguageCode: "ru", Level1s: []Level1{
+               {Value: "ru", LanguageCode: "ru"},
+               {Value: "en", LanguageCode: "en"},
+       }}
+       if err := DB.Save(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       want2 := Level2{Value: "Tom", LanguageCode: "zh", Level1s: []Level1{
+               {Value: "zh", LanguageCode: "zh"},
+               {Value: "de", LanguageCode: "de"},
+       }}
+       if err := DB.Save(&want2).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level2
+       if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+
+       var got2 Level2
+       if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got2, want2) {
+               t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
+       }
+
+       var got3 []Level2
+       if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got3, []Level2{got, got2}) {
+               t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2}))
+       }
+
+       var got4 []Level2
+       if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
+               t.Error(err)
+       }
+
+       var ruLevel1 Level1
+       var zhLevel1 Level1
+       DB.First(&ruLevel1, "value = ?", "ru")
+       DB.First(&zhLevel1, "value = ?", "zh")
+
+       got.Level1s = []Level1{ruLevel1}
+       got2.Level1s = []Level1{zhLevel1}
+       if !reflect.DeepEqual(got4, []Level2{got, got2}) {
+               t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2}))
+       }
+
+       if err := DB.Preload("Level1s").Find(&got4, "value IN (?)", []string{"non-existing"}).Error; err != nil {
+               t.Error(err)
+       }
+}
+
+func TestManyToManyPreloadForNestedPointer(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID    uint
+                       Value string
+               }
+               Level2 struct {
+                       ID      uint
+                       Value   string
+                       Level1s []*Level1 `gorm:"many2many:levels;"`
+               }
+               Level3 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID sql.NullInt64
+                       Level2   *Level2
+               }
+       )
+
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists("levels")
+
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level3{
+               Value: "Bob",
+               Level2: &Level2{
+                       Value: "Foo",
+                       Level1s: []*Level1{
+                               {Value: "ru"},
+                               {Value: "en"},
+                       },
+               },
+       }
+       if err := DB.Save(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       want2 := Level3{
+               Value: "Tom",
+               Level2: &Level2{
+                       Value: "Bar",
+                       Level1s: []*Level1{
+                               {Value: "zh"},
+                               {Value: "de"},
+                       },
+               },
+       }
+       if err := DB.Save(&want2).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level3
+       if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+
+       var got2 Level3
+       if err := DB.Preload("Level2.Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got2, want2) {
+               t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
+       }
+
+       var got3 []Level3
+       if err := DB.Preload("Level2.Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got3, []Level3{got, got2}) {
+               t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level3{got, got2}))
+       }
+
+       var got4 []Level3
+       if err := DB.Preload("Level2.Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got5 Level3
+       DB.Preload("Level2.Level1s").Find(&got5, "value = ?", "bogus")
+
+       var ruLevel1 Level1
+       var zhLevel1 Level1
+       DB.First(&ruLevel1, "value = ?", "ru")
+       DB.First(&zhLevel1, "value = ?", "zh")
+
+       got.Level2.Level1s = []*Level1{&ruLevel1}
+       got2.Level2.Level1s = []*Level1{&zhLevel1}
+       if !reflect.DeepEqual(got4, []Level3{got, got2}) {
+               t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level3{got, got2}))
+       }
+}
+
+func TestNestedManyToManyPreload(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID    uint
+                       Value string
+               }
+               Level2 struct {
+                       ID      uint
+                       Value   string
+                       Level1s []*Level1 `gorm:"many2many:level1_level2;"`
+               }
+               Level3 struct {
+                       ID      uint
+                       Value   string
+                       Level2s []Level2 `gorm:"many2many:level2_level3;"`
+               }
+       )
+
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists("level1_level2")
+       DB.DropTableIfExists("level2_level3")
+
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level3{
+               Value: "Level3",
+               Level2s: []Level2{
+                       {
+                               Value: "Bob",
+                               Level1s: []*Level1{
+                                       {Value: "ru"},
+                                       {Value: "en"},
+                               },
+                       }, {
+                               Value: "Tom",
+                               Level1s: []*Level1{
+                                       {Value: "zh"},
+                                       {Value: "de"},
+                               },
+                       },
+               },
+       }
+
+       if err := DB.Save(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level3
+       if err := DB.Preload("Level2s").Preload("Level2s.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+
+       if err := DB.Preload("Level2s.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
+               t.Error(err)
+       }
+}
+
+func TestNestedManyToManyPreload2(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID    uint
+                       Value string
+               }
+               Level2 struct {
+                       ID      uint
+                       Value   string
+                       Level1s []*Level1 `gorm:"many2many:level1_level2;"`
+               }
+               Level3 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID sql.NullInt64
+                       Level2   *Level2
+               }
+       )
+
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists("level1_level2")
+
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level3{
+               Value: "Level3",
+               Level2: &Level2{
+                       Value: "Bob",
+                       Level1s: []*Level1{
+                               {Value: "ru"},
+                               {Value: "en"},
+                       },
+               },
+       }
+
+       if err := DB.Save(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level3
+       if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+
+       if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
+               t.Error(err)
+       }
+}
+
+func TestNestedManyToManyPreload3(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID    uint
+                       Value string
+               }
+               Level2 struct {
+                       ID      uint
+                       Value   string
+                       Level1s []*Level1 `gorm:"many2many:level1_level2;"`
+               }
+               Level3 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID sql.NullInt64
+                       Level2   *Level2
+               }
+       )
+
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists("level1_level2")
+
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       level1Zh := &Level1{Value: "zh"}
+       level1Ru := &Level1{Value: "ru"}
+       level1En := &Level1{Value: "en"}
+
+       level21 := &Level2{
+               Value:   "Level2-1",
+               Level1s: []*Level1{level1Zh, level1Ru},
+       }
+
+       level22 := &Level2{
+               Value:   "Level2-2",
+               Level1s: []*Level1{level1Zh, level1En},
+       }
+
+       wants := []*Level3{
+               {
+                       Value:  "Level3-1",
+                       Level2: level21,
+               },
+               {
+                       Value:  "Level3-2",
+                       Level2: level22,
+               },
+               {
+                       Value:  "Level3-3",
+                       Level2: level21,
+               },
+       }
+
+       for _, want := range wants {
+               if err := DB.Save(&want).Error; err != nil {
+                       t.Error(err)
+               }
+       }
+
+       var gots []*Level3
+       if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB {
+               return db.Order("level1.id ASC")
+       }).Find(&gots).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(gots, wants) {
+               t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants))
+       }
+}
+
+func TestNestedManyToManyPreload3ForStruct(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID    uint
+                       Value string
+               }
+               Level2 struct {
+                       ID      uint
+                       Value   string
+                       Level1s []Level1 `gorm:"many2many:level1_level2;"`
+               }
+               Level3 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID sql.NullInt64
+                       Level2   Level2
+               }
+       )
+
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists("level1_level2")
+
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       level1Zh := Level1{Value: "zh"}
+       level1Ru := Level1{Value: "ru"}
+       level1En := Level1{Value: "en"}
+
+       level21 := Level2{
+               Value:   "Level2-1",
+               Level1s: []Level1{level1Zh, level1Ru},
+       }
+
+       level22 := Level2{
+               Value:   "Level2-2",
+               Level1s: []Level1{level1Zh, level1En},
+       }
+
+       wants := []*Level3{
+               {
+                       Value:  "Level3-1",
+                       Level2: level21,
+               },
+               {
+                       Value:  "Level3-2",
+                       Level2: level22,
+               },
+               {
+                       Value:  "Level3-3",
+                       Level2: level21,
+               },
+       }
+
+       for _, want := range wants {
+               if err := DB.Save(&want).Error; err != nil {
+                       t.Error(err)
+               }
+       }
+
+       var gots []*Level3
+       if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB {
+               return db.Order("level1.id ASC")
+       }).Find(&gots).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(gots, wants) {
+               t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants))
+       }
+}
+
+func TestNestedManyToManyPreload4(t *testing.T) {
+       type (
+               Level4 struct {
+                       ID       uint
+                       Value    string
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID      uint
+                       Value   string
+                       Level4s []*Level4
+               }
+               Level2 struct {
+                       ID      uint
+                       Value   string
+                       Level3s []*Level3 `gorm:"many2many:level2_level3;"`
+               }
+               Level1 struct {
+                       ID      uint
+                       Value   string
+                       Level2s []*Level2 `gorm:"many2many:level1_level2;"`
+               }
+       )
+
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level4{})
+       DB.DropTableIfExists("level1_level2")
+       DB.DropTableIfExists("level2_level3")
+
+       dummy := Level1{
+               Value: "Level1",
+               Level2s: []*Level2{{
+                       Value: "Level2",
+                       Level3s: []*Level3{{
+                               Value: "Level3",
+                               Level4s: []*Level4{{
+                                       Value: "Level4",
+                               }},
+                       }},
+               }},
+       }
+
+       if err := DB.AutoMigrate(&Level4{}, &Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       if err := DB.Save(&dummy).Error; err != nil {
+               t.Error(err)
+       }
+
+       var level1 Level1
+       if err := DB.Preload("Level2s").Preload("Level2s.Level3s").Preload("Level2s.Level3s.Level4s").First(&level1).Error; err != nil {
+               t.Error(err)
+       }
+}
+
+func TestManyToManyPreloadForPointer(t *testing.T) {
+       type (
+               Level1 struct {
+                       ID    uint
+                       Value string
+               }
+               Level2 struct {
+                       ID      uint
+                       Value   string
+                       Level1s []*Level1 `gorm:"many2many:levels;"`
+               }
+       )
+
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+       DB.DropTableIfExists("levels")
+
+       if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level2{Value: "Bob", Level1s: []*Level1{
+               {Value: "ru"},
+               {Value: "en"},
+       }}
+       if err := DB.Save(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       want2 := Level2{Value: "Tom", Level1s: []*Level1{
+               {Value: "zh"},
+               {Value: "de"},
+       }}
+       if err := DB.Save(&want2).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got Level2
+       if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+
+       var got2 Level2
+       if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got2, want2) {
+               t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
+       }
+
+       var got3 []Level2
+       if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got3, []Level2{got, got2}) {
+               t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2}))
+       }
+
+       var got4 []Level2
+       if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got5 Level2
+       DB.Preload("Level1s").First(&got5, "value = ?", "bogus")
+
+       var ruLevel1 Level1
+       var zhLevel1 Level1
+       DB.First(&ruLevel1, "value = ?", "ru")
+       DB.First(&zhLevel1, "value = ?", "zh")
+
+       got.Level1s = []*Level1{&ruLevel1}
+       got2.Level1s = []*Level1{&zhLevel1}
+       if !reflect.DeepEqual(got4, []Level2{got, got2}) {
+               t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2}))
+       }
+}
+
+func TestNilPointerSlice(t *testing.T) {
+       type (
+               Level3 struct {
+                       ID    uint
+                       Value string
+               }
+               Level2 struct {
+                       ID       uint
+                       Value    string
+                       Level3ID uint
+                       Level3   *Level3
+               }
+               Level1 struct {
+                       ID       uint
+                       Value    string
+                       Level2ID uint
+                       Level2   *Level2
+               }
+       )
+
+       DB.DropTableIfExists(&Level3{})
+       DB.DropTableIfExists(&Level2{})
+       DB.DropTableIfExists(&Level1{})
+
+       if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := Level1{
+               Value: "Bob",
+               Level2: &Level2{
+                       Value: "en",
+                       Level3: &Level3{
+                               Value: "native",
+                       },
+               },
+       }
+       if err := DB.Save(&want).Error; err != nil {
+               t.Error(err)
+       }
+
+       want2 := Level1{
+               Value:  "Tom",
+               Level2: nil,
+       }
+       if err := DB.Save(&want2).Error; err != nil {
+               t.Error(err)
+       }
+
+       var got []Level1
+       if err := DB.Preload("Level2").Preload("Level2.Level3").Find(&got).Error; err != nil {
+               t.Error(err)
+       }
+
+       if len(got) != 2 {
+               t.Errorf("got %v items, expected 2", len(got))
+       }
+
+       if !reflect.DeepEqual(got[0], want) && !reflect.DeepEqual(got[1], want) {
+               t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want))
+       }
+
+       if !reflect.DeepEqual(got[0], want2) && !reflect.DeepEqual(got[1], want2) {
+               t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want2))
+       }
+}
+
+func TestNilPointerSlice2(t *testing.T) {
+       type (
+               Level4 struct {
+                       ID uint
+               }
+               Level3 struct {
+                       ID       uint
+                       Level4ID sql.NullInt64 `sql:"index"`
+                       Level4   *Level4
+               }
+               Level2 struct {
+                       ID      uint
+                       Level3s []*Level3 `gorm:"many2many:level2_level3s"`
+               }
+               Level1 struct {
+                       ID       uint
+                       Level2ID sql.NullInt64 `sql:"index"`
+                       Level2   *Level2
+               }
+       )
+
+       DB.DropTableIfExists(new(Level4))
+       DB.DropTableIfExists(new(Level3))
+       DB.DropTableIfExists(new(Level2))
+       DB.DropTableIfExists(new(Level1))
+
+       if err := DB.AutoMigrate(new(Level4), new(Level3), new(Level2), new(Level1)).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := new(Level1)
+       if err := DB.Save(want).Error; err != nil {
+               t.Error(err)
+       }
+
+       got := new(Level1)
+       err := DB.Preload("Level2.Level3s.Level4").Last(&got).Error
+       if err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestPrefixedPreloadDuplication(t *testing.T) {
+       type (
+               Level4 struct {
+                       ID       uint
+                       Name     string
+                       Level3ID uint
+               }
+               Level3 struct {
+                       ID      uint
+                       Name    string
+                       Level4s []*Level4
+               }
+               Level2 struct {
+                       ID       uint
+                       Name     string
+                       Level3ID sql.NullInt64 `sql:"index"`
+                       Level3   *Level3
+               }
+               Level1 struct {
+                       ID       uint
+                       Name     string
+                       Level2ID sql.NullInt64 `sql:"index"`
+                       Level2   *Level2
+               }
+       )
+
+       DB.DropTableIfExists(new(Level3))
+       DB.DropTableIfExists(new(Level4))
+       DB.DropTableIfExists(new(Level2))
+       DB.DropTableIfExists(new(Level1))
+
+       if err := DB.AutoMigrate(new(Level3), new(Level4), new(Level2), new(Level1)).Error; err != nil {
+               t.Error(err)
+       }
+
+       lvl := &Level3{}
+       if err := DB.Save(lvl).Error; err != nil {
+               t.Error(err)
+       }
+
+       sublvl1 := &Level4{Level3ID: lvl.ID}
+       if err := DB.Save(sublvl1).Error; err != nil {
+               t.Error(err)
+       }
+       sublvl2 := &Level4{Level3ID: lvl.ID}
+       if err := DB.Save(sublvl2).Error; err != nil {
+               t.Error(err)
+       }
+
+       lvl.Level4s = []*Level4{sublvl1, sublvl2}
+
+       want1 := Level1{
+               Level2: &Level2{
+                       Level3: lvl,
+               },
+       }
+       if err := DB.Save(&want1).Error; err != nil {
+               t.Error(err)
+       }
+
+       want2 := Level1{
+               Level2: &Level2{
+                       Level3: lvl,
+               },
+       }
+       if err := DB.Save(&want2).Error; err != nil {
+               t.Error(err)
+       }
+
+       want := []Level1{want1, want2}
+
+       var got []Level1
+       err := DB.Preload("Level2.Level3.Level4s").Find(&got).Error
+       if err != nil {
+               t.Error(err)
+       }
+
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
+       }
+}
+
+func TestPreloadManyToManyCallbacks(t *testing.T) {
+       type (
+               Level2 struct {
+                       ID   uint
+                       Name string
+               }
+               Level1 struct {
+                       ID      uint
+                       Name    string
+                       Level2s []Level2 `gorm:"many2many:level1_level2s;AssociationForeignKey:ID;ForeignKey:ID"`
+               }
+       )
+
+       DB.DropTableIfExists("level1_level2s")
+       DB.DropTableIfExists(new(Level1))
+       DB.DropTableIfExists(new(Level2))
+
+       if err := DB.AutoMigrate(new(Level1), new(Level2)).Error; err != nil {
+               t.Error(err)
+       }
+
+       lvl := Level1{
+               Name: "l1",
+               Level2s: []Level2{
+                       Level2{Name: "l2-1"}, Level2{Name: "l2-2"},
+               },
+       }
+       DB.Save(&lvl)
+
+       called := 0
+
+       DB.Callback().Query().After("gorm:query").Register("TestPreloadManyToManyCallbacks", func(scope *gorm.Scope) {
+               called = called + 1
+       })
+
+       DB.Preload("Level2s").First(&Level1{}, "id = ?", lvl.ID)
+
+       if called != 3 {
+               t.Errorf("Wanted callback to be called 3 times but got %d", called)
+       }
+}
+
+func toJSONString(v interface{}) []byte {
+       r, _ := json.MarshalIndent(v, "", "  ")
+       return r
+}
diff --git a/vendor/github.com/jinzhu/gorm/query_test.go b/vendor/github.com/jinzhu/gorm/query_test.go
new file mode 100755 (executable)
index 0000000..15bf8b3
--- /dev/null
@@ -0,0 +1,773 @@
+package gorm_test
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/jinzhu/gorm"
+
+       "testing"
+       "time"
+)
+
+func TestFirstAndLast(t *testing.T) {
+       DB.Save(&User{Name: "user1", Emails: []Email{{Email: "user1@example.com"}}})
+       DB.Save(&User{Name: "user2", Emails: []Email{{Email: "user2@example.com"}}})
+
+       var user1, user2, user3, user4 User
+       DB.First(&user1)
+       DB.Order("id").Limit(1).Find(&user2)
+
+       ptrOfUser3 := &user3
+       DB.Last(&ptrOfUser3)
+       DB.Order("id desc").Limit(1).Find(&user4)
+       if user1.Id != user2.Id || user3.Id != user4.Id {
+               t.Errorf("First and Last should by order by primary key")
+       }
+
+       var users []User
+       DB.First(&users)
+       if len(users) != 1 {
+               t.Errorf("Find first record as slice")
+       }
+
+       var user User
+       if DB.Joins("left join emails on emails.user_id = users.id").First(&user).Error != nil {
+               t.Errorf("Should not raise any error when order with Join table")
+       }
+
+       if user.Email != "" {
+               t.Errorf("User's Email should be blank as no one set it")
+       }
+}
+
+func TestFirstAndLastWithNoStdPrimaryKey(t *testing.T) {
+       DB.Save(&Animal{Name: "animal1"})
+       DB.Save(&Animal{Name: "animal2"})
+
+       var animal1, animal2, animal3, animal4 Animal
+       DB.First(&animal1)
+       DB.Order("counter").Limit(1).Find(&animal2)
+
+       DB.Last(&animal3)
+       DB.Order("counter desc").Limit(1).Find(&animal4)
+       if animal1.Counter != animal2.Counter || animal3.Counter != animal4.Counter {
+               t.Errorf("First and Last should work correctly")
+       }
+}
+
+func TestFirstAndLastWithRaw(t *testing.T) {
+       user1 := User{Name: "user", Emails: []Email{{Email: "user1@example.com"}}}
+       user2 := User{Name: "user", Emails: []Email{{Email: "user2@example.com"}}}
+       DB.Save(&user1)
+       DB.Save(&user2)
+
+       var user3, user4 User
+       DB.Raw("select * from users WHERE name = ?", "user").First(&user3)
+       if user3.Id != user1.Id {
+               t.Errorf("Find first record with raw")
+       }
+
+       DB.Raw("select * from users WHERE name = ?", "user").Last(&user4)
+       if user4.Id != user2.Id {
+               t.Errorf("Find last record with raw")
+       }
+}
+
+func TestUIntPrimaryKey(t *testing.T) {
+       var animal Animal
+       DB.First(&animal, uint64(1))
+       if animal.Counter != 1 {
+               t.Errorf("Fetch a record from with a non-int primary key should work, but failed")
+       }
+
+       DB.Model(Animal{}).Where(Animal{Counter: uint64(2)}).Scan(&animal)
+       if animal.Counter != 2 {
+               t.Errorf("Fetch a record from with a non-int primary key should work, but failed")
+       }
+}
+
+func TestCustomizedTypePrimaryKey(t *testing.T) {
+       type ID uint
+       type CustomizedTypePrimaryKey struct {
+               ID   ID
+               Name string
+       }
+
+       DB.AutoMigrate(&CustomizedTypePrimaryKey{})
+
+       p1 := CustomizedTypePrimaryKey{Name: "p1"}
+       p2 := CustomizedTypePrimaryKey{Name: "p2"}
+       p3 := CustomizedTypePrimaryKey{Name: "p3"}
+       DB.Create(&p1)
+       DB.Create(&p2)
+       DB.Create(&p3)
+
+       var p CustomizedTypePrimaryKey
+
+       if err := DB.First(&p, p2.ID).Error; err == nil {
+               t.Errorf("Should return error for invalid query condition")
+       }
+
+       if err := DB.First(&p, "id = ?", p2.ID).Error; err != nil {
+               t.Errorf("No error should happen when querying with customized type for primary key, got err %v", err)
+       }
+
+       if p.Name != "p2" {
+               t.Errorf("Should find correct value when querying with customized type for primary key")
+       }
+}
+
+func TestStringPrimaryKeyForNumericValueStartingWithZero(t *testing.T) {
+       type AddressByZipCode struct {
+               ZipCode string `gorm:"primary_key"`
+               Address string
+       }
+
+       DB.AutoMigrate(&AddressByZipCode{})
+       DB.Create(&AddressByZipCode{ZipCode: "00501", Address: "Holtsville"})
+
+       var address AddressByZipCode
+       DB.First(&address, "00501")
+       if address.ZipCode != "00501" {
+               t.Errorf("Fetch a record from with a string primary key for a numeric value starting with zero should work, but failed, zip code is %v", address.ZipCode)
+       }
+}
+
+func TestFindAsSliceOfPointers(t *testing.T) {
+       DB.Save(&User{Name: "user"})
+
+       var users []User
+       DB.Find(&users)
+
+       var userPointers []*User
+       DB.Find(&userPointers)
+
+       if len(users) == 0 || len(users) != len(userPointers) {
+               t.Errorf("Find slice of pointers")
+       }
+}
+
+func TestSearchWithPlainSQL(t *testing.T) {
+       user1 := User{Name: "PlainSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "PlainSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "PlainSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+       scopedb := DB.Where("name LIKE ?", "%PlainSqlUser%")
+
+       if DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() {
+               t.Errorf("Search with plain SQL")
+       }
+
+       if DB.Where("name LIKE ?", "%"+user1.Name+"%").First(&User{}).RecordNotFound() {
+               t.Errorf("Search with plan SQL (regexp)")
+       }
+
+       var users []User
+       DB.Find(&users, "name LIKE ? and age > ?", "%PlainSqlUser%", 1)
+       if len(users) != 2 {
+               t.Errorf("Should found 2 users that age > 1, but got %v", len(users))
+       }
+
+       DB.Where("name LIKE ?", "%PlainSqlUser%").Where("age >= ?", 1).Find(&users)
+       if len(users) != 3 {
+               t.Errorf("Should found 3 users that age >= 1, but got %v", len(users))
+       }
+
+       scopedb.Where("age <> ?", 20).Find(&users)
+       if len(users) != 2 {
+               t.Errorf("Should found 2 users age != 20, but got %v", len(users))
+       }
+
+       scopedb.Where("birthday > ?", parseTime("2000-1-1")).Find(&users)
+       if len(users) != 2 {
+               t.Errorf("Should found 2 users' birthday > 2000-1-1, but got %v", len(users))
+       }
+
+       scopedb.Where("birthday > ?", "2002-10-10").Find(&users)
+       if len(users) != 2 {
+               t.Errorf("Should found 2 users' birthday >= 2002-10-10, but got %v", len(users))
+       }
+
+       scopedb.Where("birthday >= ?", "2010-1-1").Where("birthday < ?", "2020-1-1").Find(&users)
+       if len(users) != 1 {
+               t.Errorf("Should found 1 users' birthday < 2020-1-1 and >= 2010-1-1, but got %v", len(users))
+       }
+
+       DB.Where("name in (?)", []string{user1.Name, user2.Name}).Find(&users)
+       if len(users) != 2 {
+               t.Errorf("Should found 2 users, but got %v", len(users))
+       }
+
+       DB.Where("id in (?)", []int64{user1.Id, user2.Id, user3.Id}).Find(&users)
+       if len(users) != 3 {
+               t.Errorf("Should found 3 users, but got %v", len(users))
+       }
+
+       DB.Where("id in (?)", user1.Id).Find(&users)
+       if len(users) != 1 {
+               t.Errorf("Should found 1 users, but got %v", len(users))
+       }
+
+       if err := DB.Where("id IN (?)", []string{}).Find(&users).Error; err != nil {
+               t.Error("no error should happen when query with empty slice, but got: ", err)
+       }
+
+       if err := DB.Not("id IN (?)", []string{}).Find(&users).Error; err != nil {
+               t.Error("no error should happen when query with empty slice, but got: ", err)
+       }
+
+       if DB.Where("name = ?", "none existing").Find(&[]User{}).RecordNotFound() {
+               t.Errorf("Should not get RecordNotFound error when looking for none existing records")
+       }
+}
+
+func TestSearchWithTwoDimensionalArray(t *testing.T) {
+       var users []User
+       user1 := User{Name: "2DSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "2DSearchUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "2DSearchUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Create(&user1)
+       DB.Create(&user2)
+       DB.Create(&user3)
+
+       if dialect := DB.Dialect().GetName(); dialect == "mysql" || dialect == "postgres" {
+               if err := DB.Where("(name, age) IN (?)", [][]interface{}{{"2DSearchUser1", 1}, {"2DSearchUser2", 10}}).Find(&users).Error; err != nil {
+                       t.Errorf("No error should happen when query with 2D array, but got %v", err)
+
+                       if len(users) != 2 {
+                               t.Errorf("Should find 2 users with 2D array, but got %v", len(users))
+                       }
+               }
+       }
+
+       if dialect := DB.Dialect().GetName(); dialect == "mssql" {
+               if err := DB.Joins("JOIN (VALUES ?) AS x (col1, col2) ON x.col1 = name AND x.col2 = age", [][]interface{}{{"2DSearchUser1", 1}, {"2DSearchUser2", 10}}).Find(&users).Error; err != nil {
+                       t.Errorf("No error should happen when query with 2D array, but got %v", err)
+
+                       if len(users) != 2 {
+                               t.Errorf("Should find 2 users with 2D array, but got %v", len(users))
+                       }
+               }
+       }
+}
+
+func TestSearchWithStruct(t *testing.T) {
+       user1 := User{Name: "StructSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "StructSearchUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "StructSearchUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       if DB.Where(user1.Id).First(&User{}).RecordNotFound() {
+               t.Errorf("Search with primary key")
+       }
+
+       if DB.First(&User{}, user1.Id).RecordNotFound() {
+               t.Errorf("Search with primary key as inline condition")
+       }
+
+       if DB.First(&User{}, fmt.Sprintf("%v", user1.Id)).RecordNotFound() {
+               t.Errorf("Search with primary key as inline condition")
+       }
+
+       var users []User
+       DB.Where([]int64{user1.Id, user2.Id, user3.Id}).Find(&users)
+       if len(users) != 3 {
+               t.Errorf("Should found 3 users when search with primary keys, but got %v", len(users))
+       }
+
+       var user User
+       DB.First(&user, &User{Name: user1.Name})
+       if user.Id == 0 || user.Name != user1.Name {
+               t.Errorf("Search first record with inline pointer of struct")
+       }
+
+       DB.First(&user, User{Name: user1.Name})
+       if user.Id == 0 || user.Name != user1.Name {
+               t.Errorf("Search first record with inline struct")
+       }
+
+       DB.Where(&User{Name: user1.Name}).First(&user)
+       if user.Id == 0 || user.Name != user1.Name {
+               t.Errorf("Search first record with where struct")
+       }
+
+       DB.Find(&users, &User{Name: user2.Name})
+       if len(users) != 1 {
+               t.Errorf("Search all records with inline struct")
+       }
+}
+
+func TestSearchWithMap(t *testing.T) {
+       companyID := 1
+       user1 := User{Name: "MapSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "MapSearchUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "MapSearchUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       user4 := User{Name: "MapSearchUser4", Age: 30, Birthday: parseTime("2020-1-1"), CompanyID: &companyID}
+       DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4)
+
+       var user User
+       DB.First(&user, map[string]interface{}{"name": user1.Name})
+       if user.Id == 0 || user.Name != user1.Name {
+               t.Errorf("Search first record with inline map")
+       }
+
+       user = User{}
+       DB.Where(map[string]interface{}{"name": user2.Name}).First(&user)
+       if user.Id == 0 || user.Name != user2.Name {
+               t.Errorf("Search first record with where map")
+       }
+
+       var users []User
+       DB.Where(map[string]interface{}{"name": user3.Name}).Find(&users)
+       if len(users) != 1 {
+               t.Errorf("Search all records with inline map")
+       }
+
+       DB.Find(&users, map[string]interface{}{"name": user3.Name})
+       if len(users) != 1 {
+               t.Errorf("Search all records with inline map")
+       }
+
+       DB.Find(&users, map[string]interface{}{"name": user4.Name, "company_id": nil})
+       if len(users) != 0 {
+               t.Errorf("Search all records with inline map containing null value finding 0 records")
+       }
+
+       DB.Find(&users, map[string]interface{}{"name": user1.Name, "company_id": nil})
+       if len(users) != 1 {
+               t.Errorf("Search all records with inline map containing null value finding 1 record")
+       }
+
+       DB.Find(&users, map[string]interface{}{"name": user4.Name, "company_id": companyID})
+       if len(users) != 1 {
+               t.Errorf("Search all records with inline multiple value map")
+       }
+}
+
+func TestSearchWithEmptyChain(t *testing.T) {
+       user1 := User{Name: "ChainSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")}
+       user2 := User{Name: "ChainearchUser2", Age: 10, Birthday: parseTime("2010-1-1")}
+       user3 := User{Name: "ChainearchUser3", Age: 20, Birthday: parseTime("2020-1-1")}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       if DB.Where("").Where("").First(&User{}).Error != nil {
+               t.Errorf("Should not raise any error if searching with empty strings")
+       }
+
+       if DB.Where(&User{}).Where("name = ?", user1.Name).First(&User{}).Error != nil {
+               t.Errorf("Should not raise any error if searching with empty struct")
+       }
+
+       if DB.Where(map[string]interface{}{}).Where("name = ?", user1.Name).First(&User{}).Error != nil {
+               t.Errorf("Should not raise any error if searching with empty map")
+       }
+}
+
+func TestSelect(t *testing.T) {
+       user1 := User{Name: "SelectUser1"}
+       DB.Save(&user1)
+
+       var user User
+       DB.Where("name = ?", user1.Name).Select("name").Find(&user)
+       if user.Id != 0 {
+               t.Errorf("Should not have ID because only selected name, %+v", user.Id)
+       }
+
+       if user.Name != user1.Name {
+               t.Errorf("Should have user Name when selected it")
+       }
+}
+
+func TestOrderAndPluck(t *testing.T) {
+       user1 := User{Name: "OrderPluckUser1", Age: 1}
+       user2 := User{Name: "OrderPluckUser2", Age: 10}
+       user3 := User{Name: "OrderPluckUser3", Age: 20}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+       scopedb := DB.Model(&User{}).Where("name like ?", "%OrderPluckUser%")
+
+       var user User
+       scopedb.Order(gorm.Expr("case when name = ? then 0 else 1 end", "OrderPluckUser2")).First(&user)
+       if user.Name != "OrderPluckUser2" {
+               t.Errorf("Order with sql expression")
+       }
+
+       var ages []int64
+       scopedb.Order("age desc").Pluck("age", &ages)
+       if ages[0] != 20 {
+               t.Errorf("The first age should be 20 when order with age desc")
+       }
+
+       var ages1, ages2 []int64
+       scopedb.Order("age desc").Pluck("age", &ages1).Pluck("age", &ages2)
+       if !reflect.DeepEqual(ages1, ages2) {
+               t.Errorf("The first order is the primary order")
+       }
+
+       var ages3, ages4 []int64
+       scopedb.Model(&User{}).Order("age desc").Pluck("age", &ages3).Order("age", true).Pluck("age", &ages4)
+       if reflect.DeepEqual(ages3, ages4) {
+               t.Errorf("Reorder should work")
+       }
+
+       var names []string
+       var ages5 []int64
+       scopedb.Model(User{}).Order("name").Order("age desc").Pluck("age", &ages5).Pluck("name", &names)
+       if names != nil && ages5 != nil {
+               if !(names[0] == user1.Name && names[1] == user2.Name && names[2] == user3.Name && ages5[2] == 20) {
+                       t.Errorf("Order with multiple orders")
+               }
+       } else {
+               t.Errorf("Order with multiple orders")
+       }
+
+       var ages6 []int64
+       if err := scopedb.Order("").Pluck("age", &ages6).Error; err != nil {
+               t.Errorf("An empty string as order clause produces invalid queries")
+       }
+
+       DB.Model(User{}).Select("name, age").Find(&[]User{})
+}
+
+func TestLimit(t *testing.T) {
+       user1 := User{Name: "LimitUser1", Age: 1}
+       user2 := User{Name: "LimitUser2", Age: 10}
+       user3 := User{Name: "LimitUser3", Age: 20}
+       user4 := User{Name: "LimitUser4", Age: 10}
+       user5 := User{Name: "LimitUser5", Age: 20}
+       DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4).Save(&user5)
+
+       var users1, users2, users3 []User
+       DB.Order("age desc").Limit(3).Find(&users1).Limit(5).Find(&users2).Limit(-1).Find(&users3)
+
+       if len(users1) != 3 || len(users2) != 5 || len(users3) <= 5 {
+               t.Errorf("Limit should works")
+       }
+}
+
+func TestOffset(t *testing.T) {
+       for i := 0; i < 20; i++ {
+               DB.Save(&User{Name: fmt.Sprintf("OffsetUser%v", i)})
+       }
+       var users1, users2, users3, users4 []User
+       DB.Limit(100).Where("name like ?", "OffsetUser%").Order("age desc").Find(&users1).Offset(3).Find(&users2).Offset(5).Find(&users3).Offset(-1).Find(&users4)
+
+       if (len(users1) != len(users4)) || (len(users1)-len(users2) != 3) || (len(users1)-len(users3) != 5) {
+               t.Errorf("Offset should work")
+       }
+}
+
+func TestOr(t *testing.T) {
+       user1 := User{Name: "OrUser1", Age: 1}
+       user2 := User{Name: "OrUser2", Age: 10}
+       user3 := User{Name: "OrUser3", Age: 20}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       var users []User
+       DB.Where("name = ?", user1.Name).Or("name = ?", user2.Name).Find(&users)
+       if len(users) != 2 {
+               t.Errorf("Find users with or")
+       }
+}
+
+func TestCount(t *testing.T) {
+       user1 := User{Name: "CountUser1", Age: 1}
+       user2 := User{Name: "CountUser2", Age: 10}
+       user3 := User{Name: "CountUser3", Age: 20}
+
+       DB.Save(&user1).Save(&user2).Save(&user3)
+       var count, count1, count2 int64
+       var users []User
+
+       if err := DB.Where("name = ?", user1.Name).Or("name = ?", user3.Name).Find(&users).Count(&count).Error; err != nil {
+               t.Errorf(fmt.Sprintf("Count should work, but got err %v", err))
+       }
+
+       if count != int64(len(users)) {
+               t.Errorf("Count() method should get correct value")
+       }
+
+       DB.Model(&User{}).Where("name = ?", user1.Name).Count(&count1).Or("name in (?)", []string{user2.Name, user3.Name}).Count(&count2)
+       if count1 != 1 || count2 != 3 {
+               t.Errorf("Multiple count in chain")
+       }
+
+       var count3 int
+       if err := DB.Model(&User{}).Where("name in (?)", []string{user2.Name, user2.Name, user3.Name}).Group("id").Count(&count3).Error; err != nil {
+               t.Errorf("Not error should happen, but got %v", err)
+       }
+
+       if count3 != 2 {
+               t.Errorf("Should get correct count, but got %v", count3)
+       }
+}
+
+func TestNot(t *testing.T) {
+       DB.Create(getPreparedUser("user1", "not"))
+       DB.Create(getPreparedUser("user2", "not"))
+       DB.Create(getPreparedUser("user3", "not"))
+
+       user4 := getPreparedUser("user4", "not")
+       user4.Company = Company{}
+       DB.Create(user4)
+
+       DB := DB.Where("role = ?", "not")
+
+       var users1, users2, users3, users4, users5, users6, users7, users8, users9 []User
+       if DB.Find(&users1).RowsAffected != 4 {
+               t.Errorf("should find 4 not users")
+       }
+       DB.Not(users1[0].Id).Find(&users2)
+
+       if len(users1)-len(users2) != 1 {
+               t.Errorf("Should ignore the first users with Not")
+       }
+
+       DB.Not([]int{}).Find(&users3)
+       if len(users1)-len(users3) != 0 {
+               t.Errorf("Should find all users with a blank condition")
+       }
+
+       var name3Count int64
+       DB.Table("users").Where("name = ?", "user3").Count(&name3Count)
+       DB.Not("name", "user3").Find(&users4)
+       if len(users1)-len(users4) != int(name3Count) {
+               t.Errorf("Should find all users' name not equal 3")
+       }
+
+       DB.Not("name = ?", "user3").Find(&users4)
+       if len(users1)-len(users4) != int(name3Count) {
+               t.Errorf("Should find all users' name not equal 3")
+       }
+
+       DB.Not("name <> ?", "user3").Find(&users4)
+       if len(users4) != int(name3Count) {
+               t.Errorf("Should find all users' name not equal 3")
+       }
+
+       DB.Not(User{Name: "user3"}).Find(&users5)
+
+       if len(users1)-len(users5) != int(name3Count) {
+               t.Errorf("Should find all users' name not equal 3")
+       }
+
+       DB.Not(map[string]interface{}{"name": "user3"}).Find(&users6)
+       if len(users1)-len(users6) != int(name3Count) {
+               t.Errorf("Should find all users' name not equal 3")
+       }
+
+       DB.Not(map[string]interface{}{"name": "user3", "company_id": nil}).Find(&users7)
+       if len(users1)-len(users7) != 2 { // not user3 or user4
+               t.Errorf("Should find all user's name not equal to 3 who do not have company id")
+       }
+
+       DB.Not("name", []string{"user3"}).Find(&users8)
+       if len(users1)-len(users8) != int(name3Count) {
+               t.Errorf("Should find all users' name not equal 3")
+       }
+
+       var name2Count int64
+       DB.Table("users").Where("name = ?", "user2").Count(&name2Count)
+       DB.Not("name", []string{"user3", "user2"}).Find(&users9)
+       if len(users1)-len(users9) != (int(name3Count) + int(name2Count)) {
+               t.Errorf("Should find all users' name not equal 3")
+       }
+}
+
+func TestFillSmallerStruct(t *testing.T) {
+       user1 := User{Name: "SmallerUser", Age: 100}
+       DB.Save(&user1)
+       type SimpleUser struct {
+               Name      string
+               Id        int64
+               UpdatedAt time.Time
+               CreatedAt time.Time
+       }
+
+       var simpleUser SimpleUser
+       DB.Table("users").Where("name = ?", user1.Name).First(&simpleUser)
+
+       if simpleUser.Id == 0 || simpleUser.Name == "" {
+               t.Errorf("Should fill data correctly into smaller struct")
+       }
+}
+
+func TestFindOrInitialize(t *testing.T) {
+       var user1, user2, user3, user4, user5, user6 User
+       DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user1)
+       if user1.Name != "find or init" || user1.Id != 0 || user1.Age != 33 {
+               t.Errorf("user should be initialized with search value")
+       }
+
+       DB.Where(User{Name: "find or init", Age: 33}).FirstOrInit(&user2)
+       if user2.Name != "find or init" || user2.Id != 0 || user2.Age != 33 {
+               t.Errorf("user should be initialized with search value")
+       }
+
+       DB.FirstOrInit(&user3, map[string]interface{}{"name": "find or init 2"})
+       if user3.Name != "find or init 2" || user3.Id != 0 {
+               t.Errorf("user should be initialized with inline search value")
+       }
+
+       DB.Where(&User{Name: "find or init"}).Attrs(User{Age: 44}).FirstOrInit(&user4)
+       if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 {
+               t.Errorf("user should be initialized with search value and attrs")
+       }
+
+       DB.Where(&User{Name: "find or init"}).Assign("age", 44).FirstOrInit(&user4)
+       if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 {
+               t.Errorf("user should be initialized with search value and assign attrs")
+       }
+
+       DB.Save(&User{Name: "find or init", Age: 33})
+       DB.Where(&User{Name: "find or init"}).Attrs("age", 44).FirstOrInit(&user5)
+       if user5.Name != "find or init" || user5.Id == 0 || user5.Age != 33 {
+               t.Errorf("user should be found and not initialized by Attrs")
+       }
+
+       DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user6)
+       if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 33 {
+               t.Errorf("user should be found with FirstOrInit")
+       }
+
+       DB.Where(&User{Name: "find or init"}).Assign(User{Age: 44}).FirstOrInit(&user6)
+       if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 44 {
+               t.Errorf("user should be found and updated with assigned attrs")
+       }
+}
+
+func TestFindOrCreate(t *testing.T) {
+       var user1, user2, user3, user4, user5, user6, user7, user8 User
+       DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user1)
+       if user1.Name != "find or create" || user1.Id == 0 || user1.Age != 33 {
+               t.Errorf("user should be created with search value")
+       }
+
+       DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user2)
+       if user1.Id != user2.Id || user2.Name != "find or create" || user2.Id == 0 || user2.Age != 33 {
+               t.Errorf("user should be created with search value")
+       }
+
+       DB.FirstOrCreate(&user3, map[string]interface{}{"name": "find or create 2"})
+       if user3.Name != "find or create 2" || user3.Id == 0 {
+               t.Errorf("user should be created with inline search value")
+       }
+
+       DB.Where(&User{Name: "find or create 3"}).Attrs("age", 44).FirstOrCreate(&user4)
+       if user4.Name != "find or create 3" || user4.Id == 0 || user4.Age != 44 {
+               t.Errorf("user should be created with search value and attrs")
+       }
+
+       updatedAt1 := user4.UpdatedAt
+       DB.Where(&User{Name: "find or create 3"}).Assign("age", 55).FirstOrCreate(&user4)
+       if updatedAt1.Format(time.RFC3339Nano) == user4.UpdatedAt.Format(time.RFC3339Nano) {
+               t.Errorf("UpdateAt should be changed when update values with assign")
+       }
+
+       DB.Where(&User{Name: "find or create 4"}).Assign(User{Age: 44}).FirstOrCreate(&user4)
+       if user4.Name != "find or create 4" || user4.Id == 0 || user4.Age != 44 {
+               t.Errorf("user should be created with search value and assigned attrs")
+       }
+
+       DB.Where(&User{Name: "find or create"}).Attrs("age", 44).FirstOrInit(&user5)
+       if user5.Name != "find or create" || user5.Id == 0 || user5.Age != 33 {
+               t.Errorf("user should be found and not initialized by Attrs")
+       }
+
+       DB.Where(&User{Name: "find or create"}).Assign(User{Age: 44}).FirstOrCreate(&user6)
+       if user6.Name != "find or create" || user6.Id == 0 || user6.Age != 44 {
+               t.Errorf("user should be found and updated with assigned attrs")
+       }
+
+       DB.Where(&User{Name: "find or create"}).Find(&user7)
+       if user7.Name != "find or create" || user7.Id == 0 || user7.Age != 44 {
+               t.Errorf("user should be found and updated with assigned attrs")
+       }
+
+       DB.Where(&User{Name: "find or create embedded struct"}).Assign(User{Age: 44, CreditCard: CreditCard{Number: "1231231231"}, Emails: []Email{{Email: "jinzhu@assign_embedded_struct.com"}, {Email: "jinzhu-2@assign_embedded_struct.com"}}}).FirstOrCreate(&user8)
+       if DB.Where("email = ?", "jinzhu-2@assign_embedded_struct.com").First(&Email{}).RecordNotFound() {
+               t.Errorf("embedded struct email should be saved")
+       }
+
+       if DB.Where("email = ?", "1231231231").First(&CreditCard{}).RecordNotFound() {
+               t.Errorf("embedded struct credit card should be saved")
+       }
+}
+
+func TestSelectWithEscapedFieldName(t *testing.T) {
+       user1 := User{Name: "EscapedFieldNameUser", Age: 1}
+       user2 := User{Name: "EscapedFieldNameUser", Age: 10}
+       user3 := User{Name: "EscapedFieldNameUser", Age: 20}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       var names []string
+       DB.Model(User{}).Where(&User{Name: "EscapedFieldNameUser"}).Pluck("\"name\"", &names)
+
+       if len(names) != 3 {
+               t.Errorf("Expected 3 name, but got: %d", len(names))
+       }
+}
+
+func TestSelectWithVariables(t *testing.T) {
+       DB.Save(&User{Name: "jinzhu"})
+
+       rows, _ := DB.Table("users").Select("? as fake", gorm.Expr("name")).Rows()
+
+       if !rows.Next() {
+               t.Errorf("Should have returned at least one row")
+       } else {
+               columns, _ := rows.Columns()
+               if !reflect.DeepEqual(columns, []string{"fake"}) {
+                       t.Errorf("Should only contains one column")
+               }
+       }
+
+       rows.Close()
+}
+
+func TestSelectWithArrayInput(t *testing.T) {
+       DB.Save(&User{Name: "jinzhu", Age: 42})
+
+       var user User
+       DB.Select([]string{"name", "age"}).Where("age = 42 AND name = 'jinzhu'").First(&user)
+
+       if user.Name != "jinzhu" || user.Age != 42 {
+               t.Errorf("Should have selected both age and name")
+       }
+}
+
+func TestPluckWithSelect(t *testing.T) {
+       var (
+               user              = User{Name: "matematik7_pluck_with_select", Age: 25}
+               combinedName      = fmt.Sprintf("%v%v", user.Name, user.Age)
+               combineUserAgeSQL = fmt.Sprintf("concat(%v, %v)", DB.Dialect().Quote("name"), DB.Dialect().Quote("age"))
+       )
+
+       if dialect := DB.Dialect().GetName(); dialect == "sqlite3" {
+               combineUserAgeSQL = fmt.Sprintf("(%v || %v)", DB.Dialect().Quote("name"), DB.Dialect().Quote("age"))
+       }
+
+       DB.Save(&user)
+
+       selectStr := combineUserAgeSQL + " as user_age"
+       var userAges []string
+       err := DB.Model(&User{}).Where("age = ?", 25).Select(selectStr).Pluck("user_age", &userAges).Error
+       if err != nil {
+               t.Error(err)
+       }
+
+       if len(userAges) != 1 || userAges[0] != combinedName {
+               t.Errorf("Should correctly pluck with select, got: %s", userAges)
+       }
+
+       selectStr = combineUserAgeSQL + fmt.Sprintf(" as %v", DB.Dialect().Quote("user_age"))
+       userAges = userAges[:0]
+       err = DB.Model(&User{}).Where("age = ?", 25).Select(selectStr).Pluck("user_age", &userAges).Error
+       if err != nil {
+               t.Error(err)
+       }
+
+       if len(userAges) != 1 || userAges[0] != combinedName {
+               t.Errorf("Should correctly pluck with select, got: %s", userAges)
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/scaner_test.go b/vendor/github.com/jinzhu/gorm/scaner_test.go
new file mode 100755 (executable)
index 0000000..9e251dd
--- /dev/null
@@ -0,0 +1,139 @@
+package gorm_test
+
+import (
+       "database/sql/driver"
+       "encoding/json"
+       "errors"
+       "testing"
+
+       "github.com/jinzhu/gorm"
+)
+
+func TestScannableSlices(t *testing.T) {
+       if err := DB.AutoMigrate(&RecordWithSlice{}).Error; err != nil {
+               t.Errorf("Should create table with slice values correctly: %s", err)
+       }
+
+       r1 := RecordWithSlice{
+               Strings: ExampleStringSlice{"a", "b", "c"},
+               Structs: ExampleStructSlice{
+                       {"name1", "value1"},
+                       {"name2", "value2"},
+               },
+       }
+
+       if err := DB.Save(&r1).Error; err != nil {
+               t.Errorf("Should save record with slice values")
+       }
+
+       var r2 RecordWithSlice
+
+       if err := DB.Find(&r2).Error; err != nil {
+               t.Errorf("Should fetch record with slice values")
+       }
+
+       if len(r2.Strings) != 3 || r2.Strings[0] != "a" || r2.Strings[1] != "b" || r2.Strings[2] != "c" {
+               t.Errorf("Should have serialised and deserialised a string array")
+       }
+
+       if len(r2.Structs) != 2 || r2.Structs[0].Name != "name1" || r2.Structs[0].Value != "value1" || r2.Structs[1].Name != "name2" || r2.Structs[1].Value != "value2" {
+               t.Errorf("Should have serialised and deserialised a struct array")
+       }
+}
+
+type RecordWithSlice struct {
+       ID      uint64
+       Strings ExampleStringSlice `sql:"type:text"`
+       Structs ExampleStructSlice `sql:"type:text"`
+}
+
+type ExampleStringSlice []string
+
+func (l ExampleStringSlice) Value() (driver.Value, error) {
+       bytes, err := json.Marshal(l)
+       return string(bytes), err
+}
+
+func (l *ExampleStringSlice) Scan(input interface{}) error {
+       switch value := input.(type) {
+       case string:
+               return json.Unmarshal([]byte(value), l)
+       case []byte:
+               return json.Unmarshal(value, l)
+       default:
+               return errors.New("not supported")
+       }
+}
+
+type ExampleStruct struct {
+       Name  string
+       Value string
+}
+
+type ExampleStructSlice []ExampleStruct
+
+func (l ExampleStructSlice) Value() (driver.Value, error) {
+       bytes, err := json.Marshal(l)
+       return string(bytes), err
+}
+
+func (l *ExampleStructSlice) Scan(input interface{}) error {
+       switch value := input.(type) {
+       case string:
+               return json.Unmarshal([]byte(value), l)
+       case []byte:
+               return json.Unmarshal(value, l)
+       default:
+               return errors.New("not supported")
+       }
+}
+
+type ScannerDataType struct {
+       Street string `sql:"TYPE:varchar(24)"`
+}
+
+func (ScannerDataType) Value() (driver.Value, error) {
+       return nil, nil
+}
+
+func (*ScannerDataType) Scan(input interface{}) error {
+       return nil
+}
+
+type ScannerDataTypeTestStruct struct {
+       Field1          int
+       ScannerDataType *ScannerDataType `sql:"TYPE:json"`
+}
+
+type ScannerDataType2 struct {
+       Street string `sql:"TYPE:varchar(24)"`
+}
+
+func (ScannerDataType2) Value() (driver.Value, error) {
+       return nil, nil
+}
+
+func (*ScannerDataType2) Scan(input interface{}) error {
+       return nil
+}
+
+type ScannerDataTypeTestStruct2 struct {
+       Field1          int
+       ScannerDataType *ScannerDataType2
+}
+
+func TestScannerDataType(t *testing.T) {
+       scope := gorm.Scope{Value: &ScannerDataTypeTestStruct{}}
+       if field, ok := scope.FieldByName("ScannerDataType"); ok {
+               if DB.Dialect().DataTypeOf(field.StructField) != "json" {
+                       t.Errorf("data type for scanner is wrong")
+               }
+       }
+
+       scope = gorm.Scope{Value: &ScannerDataTypeTestStruct2{}}
+       if field, ok := scope.FieldByName("ScannerDataType"); ok {
+               if DB.Dialect().DataTypeOf(field.StructField) != "varchar(24)" {
+                       t.Errorf("data type for scanner is wrong")
+               }
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/scope.go b/vendor/github.com/jinzhu/gorm/scope.go
new file mode 100755 (executable)
index 0000000..806ccb7
--- /dev/null
@@ -0,0 +1,1397 @@
+package gorm
+
+import (
+       "bytes"
+       "database/sql"
+       "database/sql/driver"
+       "errors"
+       "fmt"
+       "reflect"
+       "regexp"
+       "strings"
+       "time"
+)
+
+// Scope contain current operation's information when you perform any operation on the database
+type Scope struct {
+       Search          *search
+       Value           interface{}
+       SQL             string
+       SQLVars         []interface{}
+       db              *DB
+       instanceID      string
+       primaryKeyField *Field
+       skipLeft        bool
+       fields          *[]*Field
+       selectAttrs     *[]string
+}
+
+// IndirectValue return scope's reflect value's indirect value
+func (scope *Scope) IndirectValue() reflect.Value {
+       return indirect(reflect.ValueOf(scope.Value))
+}
+
+// New create a new Scope without search information
+func (scope *Scope) New(value interface{}) *Scope {
+       return &Scope{db: scope.NewDB(), Search: &search{}, Value: value}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Scope DB
+////////////////////////////////////////////////////////////////////////////////
+
+// DB return scope's DB connection
+func (scope *Scope) DB() *DB {
+       return scope.db
+}
+
+// NewDB create a new DB without search information
+func (scope *Scope) NewDB() *DB {
+       if scope.db != nil {
+               db := scope.db.clone()
+               db.search = nil
+               db.Value = nil
+               return db
+       }
+       return nil
+}
+
+// SQLDB return *sql.DB
+func (scope *Scope) SQLDB() SQLCommon {
+       return scope.db.db
+}
+
+// Dialect get dialect
+func (scope *Scope) Dialect() Dialect {
+       return scope.db.dialect
+}
+
+// Quote used to quote string to escape them for database
+func (scope *Scope) Quote(str string) string {
+       if strings.Contains(str, ".") {
+               newStrs := []string{}
+               for _, str := range strings.Split(str, ".") {
+                       newStrs = append(newStrs, scope.Dialect().Quote(str))
+               }
+               return strings.Join(newStrs, ".")
+       }
+
+       return scope.Dialect().Quote(str)
+}
+
+// Err add error to Scope
+func (scope *Scope) Err(err error) error {
+       if err != nil {
+               scope.db.AddError(err)
+       }
+       return err
+}
+
+// HasError check if there are any error
+func (scope *Scope) HasError() bool {
+       return scope.db.Error != nil
+}
+
+// Log print log message
+func (scope *Scope) Log(v ...interface{}) {
+       scope.db.log(v...)
+}
+
+// SkipLeft skip remaining callbacks
+func (scope *Scope) SkipLeft() {
+       scope.skipLeft = true
+}
+
+// Fields get value's fields
+func (scope *Scope) Fields() []*Field {
+       if scope.fields == nil {
+               var (
+                       fields             []*Field
+                       indirectScopeValue = scope.IndirectValue()
+                       isStruct           = indirectScopeValue.Kind() == reflect.Struct
+               )
+
+               for _, structField := range scope.GetModelStruct().StructFields {
+                       if isStruct {
+                               fieldValue := indirectScopeValue
+                               for _, name := range structField.Names {
+                                       if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() {
+                                               fieldValue.Set(reflect.New(fieldValue.Type().Elem()))
+                                       }
+                                       fieldValue = reflect.Indirect(fieldValue).FieldByName(name)
+                               }
+                               fields = append(fields, &Field{StructField: structField, Field: fieldValue, IsBlank: isBlank(fieldValue)})
+                       } else {
+                               fields = append(fields, &Field{StructField: structField, IsBlank: true})
+                       }
+               }
+               scope.fields = &fields
+       }
+
+       return *scope.fields
+}
+
+// FieldByName find `gorm.Field` with field name or db name
+func (scope *Scope) FieldByName(name string) (field *Field, ok bool) {
+       var (
+               dbName           = ToColumnName(name)
+               mostMatchedField *Field
+       )
+
+       for _, field := range scope.Fields() {
+               if field.Name == name || field.DBName == name {
+                       return field, true
+               }
+               if field.DBName == dbName {
+                       mostMatchedField = field
+               }
+       }
+       return mostMatchedField, mostMatchedField != nil
+}
+
+// PrimaryFields return scope's primary fields
+func (scope *Scope) PrimaryFields() (fields []*Field) {
+       for _, field := range scope.Fields() {
+               if field.IsPrimaryKey {
+                       fields = append(fields, field)
+               }
+       }
+       return fields
+}
+
+// PrimaryField return scope's main primary field, if defined more that one primary fields, will return the one having column name `id` or the first one
+func (scope *Scope) PrimaryField() *Field {
+       if primaryFields := scope.GetModelStruct().PrimaryFields; len(primaryFields) > 0 {
+               if len(primaryFields) > 1 {
+                       if field, ok := scope.FieldByName("id"); ok {
+                               return field
+                       }
+               }
+               return scope.PrimaryFields()[0]
+       }
+       return nil
+}
+
+// PrimaryKey get main primary field's db name
+func (scope *Scope) PrimaryKey() string {
+       if field := scope.PrimaryField(); field != nil {
+               return field.DBName
+       }
+       return ""
+}
+
+// PrimaryKeyZero check main primary field's value is blank or not
+func (scope *Scope) PrimaryKeyZero() bool {
+       field := scope.PrimaryField()
+       return field == nil || field.IsBlank
+}
+
+// PrimaryKeyValue get the primary key's value
+func (scope *Scope) PrimaryKeyValue() interface{} {
+       if field := scope.PrimaryField(); field != nil && field.Field.IsValid() {
+               return field.Field.Interface()
+       }
+       return 0
+}
+
+// HasColumn to check if has column
+func (scope *Scope) HasColumn(column string) bool {
+       for _, field := range scope.GetStructFields() {
+               if field.IsNormal && (field.Name == column || field.DBName == column) {
+                       return true
+               }
+       }
+       return false
+}
+
+// SetColumn to set the column's value, column could be field or field's name/dbname
+func (scope *Scope) SetColumn(column interface{}, value interface{}) error {
+       var updateAttrs = map[string]interface{}{}
+       if attrs, ok := scope.InstanceGet("gorm:update_attrs"); ok {
+               updateAttrs = attrs.(map[string]interface{})
+               defer scope.InstanceSet("gorm:update_attrs", updateAttrs)
+       }
+
+       if field, ok := column.(*Field); ok {
+               updateAttrs[field.DBName] = value
+               return field.Set(value)
+       } else if name, ok := column.(string); ok {
+               var (
+                       dbName           = ToDBName(name)
+                       mostMatchedField *Field
+               )
+               for _, field := range scope.Fields() {
+                       if field.DBName == value {
+                               updateAttrs[field.DBName] = value
+                               return field.Set(value)
+                       }
+                       if (field.DBName == dbName) || (field.Name == name && mostMatchedField == nil) {
+                               mostMatchedField = field
+                       }
+               }
+
+               if mostMatchedField != nil {
+                       updateAttrs[mostMatchedField.DBName] = value
+                       return mostMatchedField.Set(value)
+               }
+       }
+       return errors.New("could not convert column to field")
+}
+
+// CallMethod call scope value's method, if it is a slice, will call its element's method one by one
+func (scope *Scope) CallMethod(methodName string) {
+       if scope.Value == nil {
+               return
+       }
+
+       if indirectScopeValue := scope.IndirectValue(); indirectScopeValue.Kind() == reflect.Slice {
+               for i := 0; i < indirectScopeValue.Len(); i++ {
+                       scope.callMethod(methodName, indirectScopeValue.Index(i))
+               }
+       } else {
+               scope.callMethod(methodName, indirectScopeValue)
+       }
+}
+
+// AddToVars add value as sql's vars, used to prevent SQL injection
+func (scope *Scope) AddToVars(value interface{}) string {
+       _, skipBindVar := scope.InstanceGet("skip_bindvar")
+
+       if expr, ok := value.(*expr); ok {
+               exp := expr.expr
+               for _, arg := range expr.args {
+                       if skipBindVar {
+                               scope.AddToVars(arg)
+                       } else {
+                               exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1)
+                       }
+               }
+               return exp
+       }
+
+       scope.SQLVars = append(scope.SQLVars, value)
+
+       if skipBindVar {
+               return "?"
+       }
+       return scope.Dialect().BindVar(len(scope.SQLVars))
+}
+
+// SelectAttrs return selected attributes
+func (scope *Scope) SelectAttrs() []string {
+       if scope.selectAttrs == nil {
+               attrs := []string{}
+               for _, value := range scope.Search.selects {
+                       if str, ok := value.(string); ok {
+                               attrs = append(attrs, str)
+                       } else if strs, ok := value.([]string); ok {
+                               attrs = append(attrs, strs...)
+                       } else if strs, ok := value.([]interface{}); ok {
+                               for _, str := range strs {
+                                       attrs = append(attrs, fmt.Sprintf("%v", str))
+                               }
+                       }
+               }
+               scope.selectAttrs = &attrs
+       }
+       return *scope.selectAttrs
+}
+
+// OmitAttrs return omitted attributes
+func (scope *Scope) OmitAttrs() []string {
+       return scope.Search.omits
+}
+
+type tabler interface {
+       TableName() string
+}
+
+type dbTabler interface {
+       TableName(*DB) string
+}
+
+// TableName return table name
+func (scope *Scope) TableName() string {
+       if scope.Search != nil && len(scope.Search.tableName) > 0 {
+               return scope.Search.tableName
+       }
+
+       if tabler, ok := scope.Value.(tabler); ok {
+               return tabler.TableName()
+       }
+
+       if tabler, ok := scope.Value.(dbTabler); ok {
+               return tabler.TableName(scope.db)
+       }
+
+       return scope.GetModelStruct().TableName(scope.db.Model(scope.Value))
+}
+
+// QuotedTableName return quoted table name
+func (scope *Scope) QuotedTableName() (name string) {
+       if scope.Search != nil && len(scope.Search.tableName) > 0 {
+               if strings.Contains(scope.Search.tableName, " ") {
+                       return scope.Search.tableName
+               }
+               return scope.Quote(scope.Search.tableName)
+       }
+
+       return scope.Quote(scope.TableName())
+}
+
+// CombinedConditionSql return combined condition sql
+func (scope *Scope) CombinedConditionSql() string {
+       joinSQL := scope.joinsSQL()
+       whereSQL := scope.whereSQL()
+       if scope.Search.raw {
+               whereSQL = strings.TrimSuffix(strings.TrimPrefix(whereSQL, "WHERE ("), ")")
+       }
+       return joinSQL + whereSQL + scope.groupSQL() +
+               scope.havingSQL() + scope.orderSQL() + scope.limitAndOffsetSQL()
+}
+
+// Raw set raw sql
+func (scope *Scope) Raw(sql string) *Scope {
+       scope.SQL = strings.Replace(sql, "$$$", "?", -1)
+       return scope
+}
+
+// Exec perform generated SQL
+func (scope *Scope) Exec() *Scope {
+       defer scope.trace(NowFunc())
+
+       if !scope.HasError() {
+               if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
+                       if count, err := result.RowsAffected(); scope.Err(err) == nil {
+                               scope.db.RowsAffected = count
+                       }
+               }
+       }
+       return scope
+}
+
+// Set set value by name
+func (scope *Scope) Set(name string, value interface{}) *Scope {
+       scope.db.InstantSet(name, value)
+       return scope
+}
+
+// Get get setting by name
+func (scope *Scope) Get(name string) (interface{}, bool) {
+       return scope.db.Get(name)
+}
+
+// InstanceID get InstanceID for scope
+func (scope *Scope) InstanceID() string {
+       if scope.instanceID == "" {
+               scope.instanceID = fmt.Sprintf("%v%v", &scope, &scope.db)
+       }
+       return scope.instanceID
+}
+
+// InstanceSet set instance setting for current operation, but not for operations in callbacks, like saving associations callback
+func (scope *Scope) InstanceSet(name string, value interface{}) *Scope {
+       return scope.Set(name+scope.InstanceID(), value)
+}
+
+// InstanceGet get instance setting from current operation
+func (scope *Scope) InstanceGet(name string) (interface{}, bool) {
+       return scope.Get(name + scope.InstanceID())
+}
+
+// Begin start a transaction
+func (scope *Scope) Begin() *Scope {
+       if db, ok := scope.SQLDB().(sqlDb); ok {
+               if tx, err := db.Begin(); err == nil {
+                       scope.db.db = interface{}(tx).(SQLCommon)
+                       scope.InstanceSet("gorm:started_transaction", true)
+               }
+       }
+       return scope
+}
+
+// CommitOrRollback commit current transaction if no error happened, otherwise will rollback it
+func (scope *Scope) CommitOrRollback() *Scope {
+       if _, ok := scope.InstanceGet("gorm:started_transaction"); ok {
+               if db, ok := scope.db.db.(sqlTx); ok {
+                       if scope.HasError() {
+                               db.Rollback()
+                       } else {
+                               scope.Err(db.Commit())
+                       }
+                       scope.db.db = scope.db.parent.db
+               }
+       }
+       return scope
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Private Methods For *gorm.Scope
+////////////////////////////////////////////////////////////////////////////////
+
+func (scope *Scope) callMethod(methodName string, reflectValue reflect.Value) {
+       // Only get address from non-pointer
+       if reflectValue.CanAddr() && reflectValue.Kind() != reflect.Ptr {
+               reflectValue = reflectValue.Addr()
+       }
+
+       if methodValue := reflectValue.MethodByName(methodName); methodValue.IsValid() {
+               switch method := methodValue.Interface().(type) {
+               case func():
+                       method()
+               case func(*Scope):
+                       method(scope)
+               case func(*DB):
+                       newDB := scope.NewDB()
+                       method(newDB)
+                       scope.Err(newDB.Error)
+               case func() error:
+                       scope.Err(method())
+               case func(*Scope) error:
+                       scope.Err(method(scope))
+               case func(*DB) error:
+                       newDB := scope.NewDB()
+                       scope.Err(method(newDB))
+                       scope.Err(newDB.Error)
+               default:
+                       scope.Err(fmt.Errorf("unsupported function %v", methodName))
+               }
+       }
+}
+
+var (
+       columnRegexp        = regexp.MustCompile("^[a-zA-Z\\d]+(\\.[a-zA-Z\\d]+)*$") // only match string like `name`, `users.name`
+       isNumberRegexp      = regexp.MustCompile("^\\s*\\d+\\s*$")                   // match if string is number
+       comparisonRegexp    = regexp.MustCompile("(?i) (=|<>|(>|<)(=?)|LIKE|IS|IN) ")
+       countingQueryRegexp = regexp.MustCompile("(?i)^count(.+)$")
+)
+
+func (scope *Scope) quoteIfPossible(str string) string {
+       if columnRegexp.MatchString(str) {
+               return scope.Quote(str)
+       }
+       return str
+}
+
+func (scope *Scope) scan(rows *sql.Rows, columns []string, fields []*Field) {
+       var (
+               ignored            interface{}
+               values             = make([]interface{}, len(columns))
+               selectFields       []*Field
+               selectedColumnsMap = map[string]int{}
+               resetFields        = map[int]*Field{}
+       )
+
+       for index, column := range columns {
+               values[index] = &ignored
+
+               selectFields = fields
+               offset := 0
+               if idx, ok := selectedColumnsMap[column]; ok {
+                       offset = idx + 1
+                       selectFields = selectFields[offset:]
+               }
+
+               for fieldIndex, field := range selectFields {
+                       if field.DBName == column {
+                               if field.Field.Kind() == reflect.Ptr {
+                                       values[index] = field.Field.Addr().Interface()
+                               } else {
+                                       reflectValue := reflect.New(reflect.PtrTo(field.Struct.Type))
+                                       reflectValue.Elem().Set(field.Field.Addr())
+                                       values[index] = reflectValue.Interface()
+                                       resetFields[index] = field
+                               }
+
+                               selectedColumnsMap[column] = offset + fieldIndex
+
+                               if field.IsNormal {
+                                       break
+                               }
+                       }
+               }
+       }
+
+       scope.Err(rows.Scan(values...))
+
+       for index, field := range resetFields {
+               if v := reflect.ValueOf(values[index]).Elem().Elem(); v.IsValid() {
+                       field.Field.Set(v)
+               }
+       }
+}
+
+func (scope *Scope) primaryCondition(value interface{}) string {
+       return fmt.Sprintf("(%v.%v = %v)", scope.QuotedTableName(), scope.Quote(scope.PrimaryKey()), value)
+}
+
+func (scope *Scope) buildCondition(clause map[string]interface{}, include bool) (str string) {
+       var (
+               quotedTableName  = scope.QuotedTableName()
+               quotedPrimaryKey = scope.Quote(scope.PrimaryKey())
+               equalSQL         = "="
+               inSQL            = "IN"
+       )
+
+       // If building not conditions
+       if !include {
+               equalSQL = "<>"
+               inSQL = "NOT IN"
+       }
+
+       switch value := clause["query"].(type) {
+       case sql.NullInt64:
+               return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, value.Int64)
+       case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+               return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, value)
+       case []int, []int8, []int16, []int32, []int64, []uint, []uint8, []uint16, []uint32, []uint64, []string, []interface{}:
+               if !include && reflect.ValueOf(value).Len() == 0 {
+                       return
+               }
+               str = fmt.Sprintf("(%v.%v %s (?))", quotedTableName, quotedPrimaryKey, inSQL)
+               clause["args"] = []interface{}{value}
+       case string:
+               if isNumberRegexp.MatchString(value) {
+                       return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, scope.AddToVars(value))
+               }
+
+               if value != "" {
+                       if !include {
+                               if comparisonRegexp.MatchString(value) {
+                                       str = fmt.Sprintf("NOT (%v)", value)
+                               } else {
+                                       str = fmt.Sprintf("(%v.%v NOT IN (?))", quotedTableName, scope.Quote(value))
+                               }
+                       } else {
+                               str = fmt.Sprintf("(%v)", value)
+                       }
+               }
+       case map[string]interface{}:
+               var sqls []string
+               for key, value := range value {
+                       if value != nil {
+                               sqls = append(sqls, fmt.Sprintf("(%v.%v %s %v)", quotedTableName, scope.Quote(key), equalSQL, scope.AddToVars(value)))
+                       } else {
+                               if !include {
+                                       sqls = append(sqls, fmt.Sprintf("(%v.%v IS NOT NULL)", quotedTableName, scope.Quote(key)))
+                               } else {
+                                       sqls = append(sqls, fmt.Sprintf("(%v.%v IS NULL)", quotedTableName, scope.Quote(key)))
+                               }
+                       }
+               }
+               return strings.Join(sqls, " AND ")
+       case interface{}:
+               var sqls []string
+               newScope := scope.New(value)
+
+               if len(newScope.Fields()) == 0 {
+                       scope.Err(fmt.Errorf("invalid query condition: %v", value))
+                       return
+               }
+               scopeQuotedTableName := newScope.QuotedTableName()
+               for _, field := range newScope.Fields() {
+                       if !field.IsIgnored && !field.IsBlank {
+                               sqls = append(sqls, fmt.Sprintf("(%v.%v %s %v)", scopeQuotedTableName, scope.Quote(field.DBName), equalSQL, scope.AddToVars(field.Field.Interface())))
+                       }
+               }
+               return strings.Join(sqls, " AND ")
+       default:
+               scope.Err(fmt.Errorf("invalid query condition: %v", value))
+               return
+       }
+
+       replacements := []string{}
+       args := clause["args"].([]interface{})
+       for _, arg := range args {
+               var err error
+               switch reflect.ValueOf(arg).Kind() {
+               case reflect.Slice: // For where("id in (?)", []int64{1,2})
+                       if scanner, ok := interface{}(arg).(driver.Valuer); ok {
+                               arg, err = scanner.Value()
+                               replacements = append(replacements, scope.AddToVars(arg))
+                       } else if b, ok := arg.([]byte); ok {
+                               replacements = append(replacements, scope.AddToVars(b))
+                       } else if as, ok := arg.([][]interface{}); ok {
+                               var tempMarks []string
+                               for _, a := range as {
+                                       var arrayMarks []string
+                                       for _, v := range a {
+                                               arrayMarks = append(arrayMarks, scope.AddToVars(v))
+                                       }
+
+                                       if len(arrayMarks) > 0 {
+                                               tempMarks = append(tempMarks, fmt.Sprintf("(%v)", strings.Join(arrayMarks, ",")))
+                                       }
+                               }
+
+                               if len(tempMarks) > 0 {
+                                       replacements = append(replacements, strings.Join(tempMarks, ","))
+                               }
+                       } else if values := reflect.ValueOf(arg); values.Len() > 0 {
+                               var tempMarks []string
+                               for i := 0; i < values.Len(); i++ {
+                                       tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface()))
+                               }
+                               replacements = append(replacements, strings.Join(tempMarks, ","))
+                       } else {
+                               replacements = append(replacements, scope.AddToVars(Expr("NULL")))
+                       }
+               default:
+                       if valuer, ok := interface{}(arg).(driver.Valuer); ok {
+                               arg, err = valuer.Value()
+                       }
+
+                       replacements = append(replacements, scope.AddToVars(arg))
+               }
+
+               if err != nil {
+                       scope.Err(err)
+               }
+       }
+
+       buff := bytes.NewBuffer([]byte{})
+       i := 0
+       for _, s := range str {
+               if s == '?' && len(replacements) > i {
+                       buff.WriteString(replacements[i])
+                       i++
+               } else {
+                       buff.WriteRune(s)
+               }
+       }
+
+       str = buff.String()
+
+       return
+}
+
+func (scope *Scope) buildSelectQuery(clause map[string]interface{}) (str string) {
+       switch value := clause["query"].(type) {
+       case string:
+               str = value
+       case []string:
+               str = strings.Join(value, ", ")
+       }
+
+       args := clause["args"].([]interface{})
+       replacements := []string{}
+       for _, arg := range args {
+               switch reflect.ValueOf(arg).Kind() {
+               case reflect.Slice:
+                       values := reflect.ValueOf(arg)
+                       var tempMarks []string
+                       for i := 0; i < values.Len(); i++ {
+                               tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface()))
+                       }
+                       replacements = append(replacements, strings.Join(tempMarks, ","))
+               default:
+                       if valuer, ok := interface{}(arg).(driver.Valuer); ok {
+                               arg, _ = valuer.Value()
+                       }
+                       replacements = append(replacements, scope.AddToVars(arg))
+               }
+       }
+
+       buff := bytes.NewBuffer([]byte{})
+       i := 0
+       for pos, char := range str {
+               if str[pos] == '?' {
+                       buff.WriteString(replacements[i])
+                       i++
+               } else {
+                       buff.WriteRune(char)
+               }
+       }
+
+       str = buff.String()
+
+       return
+}
+
+func (scope *Scope) whereSQL() (sql string) {
+       var (
+               quotedTableName                                = scope.QuotedTableName()
+               deletedAtField, hasDeletedAtField              = scope.FieldByName("DeletedAt")
+               primaryConditions, andConditions, orConditions []string
+       )
+
+       if !scope.Search.Unscoped && hasDeletedAtField {
+               sql := fmt.Sprintf("%v.%v IS NULL", quotedTableName, scope.Quote(deletedAtField.DBName))
+               primaryConditions = append(primaryConditions, sql)
+       }
+
+       if !scope.PrimaryKeyZero() {
+               for _, field := range scope.PrimaryFields() {
+                       sql := fmt.Sprintf("%v.%v = %v", quotedTableName, scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))
+                       primaryConditions = append(primaryConditions, sql)
+               }
+       }
+
+       for _, clause := range scope.Search.whereConditions {
+               if sql := scope.buildCondition(clause, true); sql != "" {
+                       andConditions = append(andConditions, sql)
+               }
+       }
+
+       for _, clause := range scope.Search.orConditions {
+               if sql := scope.buildCondition(clause, true); sql != "" {
+                       orConditions = append(orConditions, sql)
+               }
+       }
+
+       for _, clause := range scope.Search.notConditions {
+               if sql := scope.buildCondition(clause, false); sql != "" {
+                       andConditions = append(andConditions, sql)
+               }
+       }
+
+       orSQL := strings.Join(orConditions, " OR ")
+       combinedSQL := strings.Join(andConditions, " AND ")
+       if len(combinedSQL) > 0 {
+               if len(orSQL) > 0 {
+                       combinedSQL = combinedSQL + " OR " + orSQL
+               }
+       } else {
+               combinedSQL = orSQL
+       }
+
+       if len(primaryConditions) > 0 {
+               sql = "WHERE " + strings.Join(primaryConditions, " AND ")
+               if len(combinedSQL) > 0 {
+                       sql = sql + " AND (" + combinedSQL + ")"
+               }
+       } else if len(combinedSQL) > 0 {
+               sql = "WHERE " + combinedSQL
+       }
+       return
+}
+
+func (scope *Scope) selectSQL() string {
+       if len(scope.Search.selects) == 0 {
+               if len(scope.Search.joinConditions) > 0 {
+                       return fmt.Sprintf("%v.*", scope.QuotedTableName())
+               }
+               return "*"
+       }
+       return scope.buildSelectQuery(scope.Search.selects)
+}
+
+func (scope *Scope) orderSQL() string {
+       if len(scope.Search.orders) == 0 || scope.Search.ignoreOrderQuery {
+               return ""
+       }
+
+       var orders []string
+       for _, order := range scope.Search.orders {
+               if str, ok := order.(string); ok {
+                       orders = append(orders, scope.quoteIfPossible(str))
+               } else if expr, ok := order.(*expr); ok {
+                       exp := expr.expr
+                       for _, arg := range expr.args {
+                               exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1)
+                       }
+                       orders = append(orders, exp)
+               }
+       }
+       return " ORDER BY " + strings.Join(orders, ",")
+}
+
+func (scope *Scope) limitAndOffsetSQL() string {
+       return scope.Dialect().LimitAndOffsetSQL(scope.Search.limit, scope.Search.offset)
+}
+
+func (scope *Scope) groupSQL() string {
+       if len(scope.Search.group) == 0 {
+               return ""
+       }
+       return " GROUP BY " + scope.Search.group
+}
+
+func (scope *Scope) havingSQL() string {
+       if len(scope.Search.havingConditions) == 0 {
+               return ""
+       }
+
+       var andConditions []string
+       for _, clause := range scope.Search.havingConditions {
+               if sql := scope.buildCondition(clause, true); sql != "" {
+                       andConditions = append(andConditions, sql)
+               }
+       }
+
+       combinedSQL := strings.Join(andConditions, " AND ")
+       if len(combinedSQL) == 0 {
+               return ""
+       }
+
+       return " HAVING " + combinedSQL
+}
+
+func (scope *Scope) joinsSQL() string {
+       var joinConditions []string
+       for _, clause := range scope.Search.joinConditions {
+               if sql := scope.buildCondition(clause, true); sql != "" {
+                       joinConditions = append(joinConditions, strings.TrimSuffix(strings.TrimPrefix(sql, "("), ")"))
+               }
+       }
+
+       return strings.Join(joinConditions, " ") + " "
+}
+
+func (scope *Scope) prepareQuerySQL() {
+       if scope.Search.raw {
+               scope.Raw(scope.CombinedConditionSql())
+       } else {
+               scope.Raw(fmt.Sprintf("SELECT %v FROM %v %v", scope.selectSQL(), scope.QuotedTableName(), scope.CombinedConditionSql()))
+       }
+       return
+}
+
+func (scope *Scope) inlineCondition(values ...interface{}) *Scope {
+       if len(values) > 0 {
+               scope.Search.Where(values[0], values[1:]...)
+       }
+       return scope
+}
+
+func (scope *Scope) callCallbacks(funcs []*func(s *Scope)) *Scope {
+       defer func() {
+               if err := recover(); err != nil {
+                       if db, ok := scope.db.db.(sqlTx); ok {
+                               db.Rollback()
+                       }
+                       panic(err)
+               }
+       }()
+       for _, f := range funcs {
+               (*f)(scope)
+               if scope.skipLeft {
+                       break
+               }
+       }
+       return scope
+}
+
+func convertInterfaceToMap(values interface{}, withIgnoredField bool) map[string]interface{} {
+       var attrs = map[string]interface{}{}
+
+       switch value := values.(type) {
+       case map[string]interface{}:
+               return value
+       case []interface{}:
+               for _, v := range value {
+                       for key, value := range convertInterfaceToMap(v, withIgnoredField) {
+                               attrs[key] = value
+                       }
+               }
+       case interface{}:
+               reflectValue := reflect.ValueOf(values)
+
+               switch reflectValue.Kind() {
+               case reflect.Map:
+                       for _, key := range reflectValue.MapKeys() {
+                               attrs[ToColumnName(key.Interface().(string))] = reflectValue.MapIndex(key).Interface()
+                       }
+               default:
+                       for _, field := range (&Scope{Value: values}).Fields() {
+                               if !field.IsBlank && (withIgnoredField || !field.IsIgnored) {
+                                       attrs[field.DBName] = field.Field.Interface()
+                               }
+                       }
+               }
+       }
+       return attrs
+}
+
+func (scope *Scope) updatedAttrsWithValues(value interface{}) (results map[string]interface{}, hasUpdate bool) {
+       if scope.IndirectValue().Kind() != reflect.Struct {
+               return convertInterfaceToMap(value, false), true
+       }
+
+       results = map[string]interface{}{}
+
+       for key, value := range convertInterfaceToMap(value, true) {
+               if field, ok := scope.FieldByName(key); ok && scope.changeableField(field) {
+                       if _, ok := value.(*expr); ok {
+                               hasUpdate = true
+                               results[field.DBName] = value
+                       } else {
+                               err := field.Set(value)
+                               if field.IsNormal && !field.IsIgnored {
+                                       hasUpdate = true
+                                       if err == ErrUnaddressable {
+                                               results[field.DBName] = value
+                                       } else {
+                                               results[field.DBName] = field.Field.Interface()
+                                       }
+                               }
+                       }
+               }
+       }
+       return
+}
+
+func (scope *Scope) row() *sql.Row {
+       defer scope.trace(NowFunc())
+
+       result := &RowQueryResult{}
+       scope.InstanceSet("row_query_result", result)
+       scope.callCallbacks(scope.db.parent.callbacks.rowQueries)
+
+       return result.Row
+}
+
+func (scope *Scope) rows() (*sql.Rows, error) {
+       defer scope.trace(NowFunc())
+
+       result := &RowsQueryResult{}
+       scope.InstanceSet("row_query_result", result)
+       scope.callCallbacks(scope.db.parent.callbacks.rowQueries)
+
+       return result.Rows, result.Error
+}
+
+func (scope *Scope) initialize() *Scope {
+       for _, clause := range scope.Search.whereConditions {
+               scope.updatedAttrsWithValues(clause["query"])
+       }
+       scope.updatedAttrsWithValues(scope.Search.initAttrs)
+       scope.updatedAttrsWithValues(scope.Search.assignAttrs)
+       return scope
+}
+
+func (scope *Scope) isQueryForColumn(query interface{}, column string) bool {
+       queryStr := strings.ToLower(fmt.Sprint(query))
+       if queryStr == column {
+               return true
+       }
+
+       if strings.HasSuffix(queryStr, "as "+column) {
+               return true
+       }
+
+       if strings.HasSuffix(queryStr, "as "+scope.Quote(column)) {
+               return true
+       }
+
+       return false
+}
+
+func (scope *Scope) pluck(column string, value interface{}) *Scope {
+       dest := reflect.Indirect(reflect.ValueOf(value))
+       if dest.Kind() != reflect.Slice {
+               scope.Err(fmt.Errorf("results should be a slice, not %s", dest.Kind()))
+               return scope
+       }
+
+       if query, ok := scope.Search.selects["query"]; !ok || !scope.isQueryForColumn(query, column) {
+               scope.Search.Select(column)
+       }
+
+       rows, err := scope.rows()
+       if scope.Err(err) == nil {
+               defer rows.Close()
+               for rows.Next() {
+                       elem := reflect.New(dest.Type().Elem()).Interface()
+                       scope.Err(rows.Scan(elem))
+                       dest.Set(reflect.Append(dest, reflect.ValueOf(elem).Elem()))
+               }
+
+               if err := rows.Err(); err != nil {
+                       scope.Err(err)
+               }
+       }
+       return scope
+}
+
+func (scope *Scope) count(value interface{}) *Scope {
+       if query, ok := scope.Search.selects["query"]; !ok || !countingQueryRegexp.MatchString(fmt.Sprint(query)) {
+               if len(scope.Search.group) != 0 {
+                       scope.Search.Select("count(*) FROM ( SELECT count(*) as name ")
+                       scope.Search.group += " ) AS count_table"
+               } else {
+                       scope.Search.Select("count(*)")
+               }
+       }
+       scope.Search.ignoreOrderQuery = true
+       scope.Err(scope.row().Scan(value))
+       return scope
+}
+
+func (scope *Scope) typeName() string {
+       typ := scope.IndirectValue().Type()
+
+       for typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr {
+               typ = typ.Elem()
+       }
+
+       return typ.Name()
+}
+
+// trace print sql log
+func (scope *Scope) trace(t time.Time) {
+       if len(scope.SQL) > 0 {
+               scope.db.slog(scope.SQL, t, scope.SQLVars...)
+       }
+}
+
+func (scope *Scope) changeableField(field *Field) bool {
+       if selectAttrs := scope.SelectAttrs(); len(selectAttrs) > 0 {
+               for _, attr := range selectAttrs {
+                       if field.Name == attr || field.DBName == attr {
+                               return true
+                       }
+               }
+               return false
+       }
+
+       for _, attr := range scope.OmitAttrs() {
+               if field.Name == attr || field.DBName == attr {
+                       return false
+               }
+       }
+
+       return true
+}
+
+func (scope *Scope) related(value interface{}, foreignKeys ...string) *Scope {
+       toScope := scope.db.NewScope(value)
+       tx := scope.db.Set("gorm:association:source", scope.Value)
+
+       for _, foreignKey := range append(foreignKeys, toScope.typeName()+"Id", scope.typeName()+"Id") {
+               fromField, _ := scope.FieldByName(foreignKey)
+               toField, _ := toScope.FieldByName(foreignKey)
+
+               if fromField != nil {
+                       if relationship := fromField.Relationship; relationship != nil {
+                               if relationship.Kind == "many_to_many" {
+                                       joinTableHandler := relationship.JoinTableHandler
+                                       scope.Err(joinTableHandler.JoinWith(joinTableHandler, tx, scope.Value).Find(value).Error)
+                               } else if relationship.Kind == "belongs_to" {
+                                       for idx, foreignKey := range relationship.ForeignDBNames {
+                                               if field, ok := scope.FieldByName(foreignKey); ok {
+                                                       tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.AssociationForeignDBNames[idx])), field.Field.Interface())
+                                               }
+                                       }
+                                       scope.Err(tx.Find(value).Error)
+                               } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" {
+                                       for idx, foreignKey := range relationship.ForeignDBNames {
+                                               if field, ok := scope.FieldByName(relationship.AssociationForeignDBNames[idx]); ok {
+                                                       tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
+                                               }
+                                       }
+
+                                       if relationship.PolymorphicType != "" {
+                                               tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue)
+                                       }
+                                       scope.Err(tx.Find(value).Error)
+                               }
+                       } else {
+                               sql := fmt.Sprintf("%v = ?", scope.Quote(toScope.PrimaryKey()))
+                               scope.Err(tx.Where(sql, fromField.Field.Interface()).Find(value).Error)
+                       }
+                       return scope
+               } else if toField != nil {
+                       sql := fmt.Sprintf("%v = ?", scope.Quote(toField.DBName))
+                       scope.Err(tx.Where(sql, scope.PrimaryKeyValue()).Find(value).Error)
+                       return scope
+               }
+       }
+
+       scope.Err(fmt.Errorf("invalid association %v", foreignKeys))
+       return scope
+}
+
+// getTableOptions return the table options string or an empty string if the table options does not exist
+func (scope *Scope) getTableOptions() string {
+       tableOptions, ok := scope.Get("gorm:table_options")
+       if !ok {
+               return ""
+       }
+       return " " + tableOptions.(string)
+}
+
+func (scope *Scope) createJoinTable(field *StructField) {
+       if relationship := field.Relationship; relationship != nil && relationship.JoinTableHandler != nil {
+               joinTableHandler := relationship.JoinTableHandler
+               joinTable := joinTableHandler.Table(scope.db)
+               if !scope.Dialect().HasTable(joinTable) {
+                       toScope := &Scope{Value: reflect.New(field.Struct.Type).Interface()}
+
+                       var sqlTypes, primaryKeys []string
+                       for idx, fieldName := range relationship.ForeignFieldNames {
+                               if field, ok := scope.FieldByName(fieldName); ok {
+                                       foreignKeyStruct := field.clone()
+                                       foreignKeyStruct.IsPrimaryKey = false
+                                       foreignKeyStruct.TagSettingsSet("IS_JOINTABLE_FOREIGNKEY", "true")
+                                       foreignKeyStruct.TagSettingsDelete("AUTO_INCREMENT")
+                                       sqlTypes = append(sqlTypes, scope.Quote(relationship.ForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct))
+                                       primaryKeys = append(primaryKeys, scope.Quote(relationship.ForeignDBNames[idx]))
+                               }
+                       }
+
+                       for idx, fieldName := range relationship.AssociationForeignFieldNames {
+                               if field, ok := toScope.FieldByName(fieldName); ok {
+                                       foreignKeyStruct := field.clone()
+                                       foreignKeyStruct.IsPrimaryKey = false
+                                       foreignKeyStruct.TagSettingsSet("IS_JOINTABLE_FOREIGNKEY", "true")
+                                       foreignKeyStruct.TagSettingsDelete("AUTO_INCREMENT")
+                                       sqlTypes = append(sqlTypes, scope.Quote(relationship.AssociationForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct))
+                                       primaryKeys = append(primaryKeys, scope.Quote(relationship.AssociationForeignDBNames[idx]))
+                               }
+                       }
+
+                       scope.Err(scope.NewDB().Exec(fmt.Sprintf("CREATE TABLE %v (%v, PRIMARY KEY (%v))%s", scope.Quote(joinTable), strings.Join(sqlTypes, ","), strings.Join(primaryKeys, ","), scope.getTableOptions())).Error)
+               }
+               scope.NewDB().Table(joinTable).AutoMigrate(joinTableHandler)
+       }
+}
+
+func (scope *Scope) createTable() *Scope {
+       var tags []string
+       var primaryKeys []string
+       var primaryKeyInColumnType = false
+       for _, field := range scope.GetModelStruct().StructFields {
+               if field.IsNormal {
+                       sqlTag := scope.Dialect().DataTypeOf(field)
+
+                       // Check if the primary key constraint was specified as
+                       // part of the column type. If so, we can only support
+                       // one column as the primary key.
+                       if strings.Contains(strings.ToLower(sqlTag), "primary key") {
+                               primaryKeyInColumnType = true
+                       }
+
+                       tags = append(tags, scope.Quote(field.DBName)+" "+sqlTag)
+               }
+
+               if field.IsPrimaryKey {
+                       primaryKeys = append(primaryKeys, scope.Quote(field.DBName))
+               }
+               scope.createJoinTable(field)
+       }
+
+       var primaryKeyStr string
+       if len(primaryKeys) > 0 && !primaryKeyInColumnType {
+               primaryKeyStr = fmt.Sprintf(", PRIMARY KEY (%v)", strings.Join(primaryKeys, ","))
+       }
+
+       scope.Raw(fmt.Sprintf("CREATE TABLE %v (%v %v)%s", scope.QuotedTableName(), strings.Join(tags, ","), primaryKeyStr, scope.getTableOptions())).Exec()
+
+       scope.autoIndex()
+       return scope
+}
+
+func (scope *Scope) dropTable() *Scope {
+       scope.Raw(fmt.Sprintf("DROP TABLE %v%s", scope.QuotedTableName(), scope.getTableOptions())).Exec()
+       return scope
+}
+
+func (scope *Scope) modifyColumn(column string, typ string) {
+       scope.db.AddError(scope.Dialect().ModifyColumn(scope.QuotedTableName(), scope.Quote(column), typ))
+}
+
+func (scope *Scope) dropColumn(column string) {
+       scope.Raw(fmt.Sprintf("ALTER TABLE %v DROP COLUMN %v", scope.QuotedTableName(), scope.Quote(column))).Exec()
+}
+
+func (scope *Scope) addIndex(unique bool, indexName string, column ...string) {
+       if scope.Dialect().HasIndex(scope.TableName(), indexName) {
+               return
+       }
+
+       var columns []string
+       for _, name := range column {
+               columns = append(columns, scope.quoteIfPossible(name))
+       }
+
+       sqlCreate := "CREATE INDEX"
+       if unique {
+               sqlCreate = "CREATE UNIQUE INDEX"
+       }
+
+       scope.Raw(fmt.Sprintf("%s %v ON %v(%v) %v", sqlCreate, indexName, scope.QuotedTableName(), strings.Join(columns, ", "), scope.whereSQL())).Exec()
+}
+
+func (scope *Scope) addForeignKey(field string, dest string, onDelete string, onUpdate string) {
+       // Compatible with old generated key
+       keyName := scope.Dialect().BuildKeyName(scope.TableName(), field, dest, "foreign")
+
+       if scope.Dialect().HasForeignKey(scope.TableName(), keyName) {
+               return
+       }
+       var query = `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ON DELETE %s ON UPDATE %s;`
+       scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName), scope.quoteIfPossible(field), dest, onDelete, onUpdate)).Exec()
+}
+
+func (scope *Scope) removeForeignKey(field string, dest string) {
+       keyName := scope.Dialect().BuildKeyName(scope.TableName(), field, dest, "foreign")
+       if !scope.Dialect().HasForeignKey(scope.TableName(), keyName) {
+               return
+       }
+       var mysql mysql
+       var query string
+       if scope.Dialect().GetName() == mysql.GetName() {
+               query = `ALTER TABLE %s DROP FOREIGN KEY %s;`
+       } else {
+               query = `ALTER TABLE %s DROP CONSTRAINT %s;`
+       }
+
+       scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName))).Exec()
+}
+
+func (scope *Scope) removeIndex(indexName string) {
+       scope.Dialect().RemoveIndex(scope.TableName(), indexName)
+}
+
+func (scope *Scope) autoMigrate() *Scope {
+       tableName := scope.TableName()
+       quotedTableName := scope.QuotedTableName()
+
+       if !scope.Dialect().HasTable(tableName) {
+               scope.createTable()
+       } else {
+               for _, field := range scope.GetModelStruct().StructFields {
+                       if !scope.Dialect().HasColumn(tableName, field.DBName) {
+                               if field.IsNormal {
+                                       sqlTag := scope.Dialect().DataTypeOf(field)
+                                       scope.Raw(fmt.Sprintf("ALTER TABLE %v ADD %v %v;", quotedTableName, scope.Quote(field.DBName), sqlTag)).Exec()
+                               }
+                       }
+                       scope.createJoinTable(field)
+               }
+               scope.autoIndex()
+       }
+       return scope
+}
+
+func (scope *Scope) autoIndex() *Scope {
+       var indexes = map[string][]string{}
+       var uniqueIndexes = map[string][]string{}
+
+       for _, field := range scope.GetStructFields() {
+               if name, ok := field.TagSettingsGet("INDEX"); ok {
+                       names := strings.Split(name, ",")
+
+                       for _, name := range names {
+                               if name == "INDEX" || name == "" {
+                                       name = scope.Dialect().BuildKeyName("idx", scope.TableName(), field.DBName)
+                               }
+                               indexes[name] = append(indexes[name], field.DBName)
+                       }
+               }
+
+               if name, ok := field.TagSettingsGet("UNIQUE_INDEX"); ok {
+                       names := strings.Split(name, ",")
+
+                       for _, name := range names {
+                               if name == "UNIQUE_INDEX" || name == "" {
+                                       name = scope.Dialect().BuildKeyName("uix", scope.TableName(), field.DBName)
+                               }
+                               uniqueIndexes[name] = append(uniqueIndexes[name], field.DBName)
+                       }
+               }
+       }
+
+       for name, columns := range indexes {
+               if db := scope.NewDB().Table(scope.TableName()).Model(scope.Value).AddIndex(name, columns...); db.Error != nil {
+                       scope.db.AddError(db.Error)
+               }
+       }
+
+       for name, columns := range uniqueIndexes {
+               if db := scope.NewDB().Table(scope.TableName()).Model(scope.Value).AddUniqueIndex(name, columns...); db.Error != nil {
+                       scope.db.AddError(db.Error)
+               }
+       }
+
+       return scope
+}
+
+func (scope *Scope) getColumnAsArray(columns []string, values ...interface{}) (results [][]interface{}) {
+       for _, value := range values {
+               indirectValue := indirect(reflect.ValueOf(value))
+
+               switch indirectValue.Kind() {
+               case reflect.Slice:
+                       for i := 0; i < indirectValue.Len(); i++ {
+                               var result []interface{}
+                               var object = indirect(indirectValue.Index(i))
+                               var hasValue = false
+                               for _, column := range columns {
+                                       field := object.FieldByName(column)
+                                       if hasValue || !isBlank(field) {
+                                               hasValue = true
+                                       }
+                                       result = append(result, field.Interface())
+                               }
+
+                               if hasValue {
+                                       results = append(results, result)
+                               }
+                       }
+               case reflect.Struct:
+                       var result []interface{}
+                       var hasValue = false
+                       for _, column := range columns {
+                               field := indirectValue.FieldByName(column)
+                               if hasValue || !isBlank(field) {
+                                       hasValue = true
+                               }
+                               result = append(result, field.Interface())
+                       }
+
+                       if hasValue {
+                               results = append(results, result)
+                       }
+               }
+       }
+
+       return
+}
+
+func (scope *Scope) getColumnAsScope(column string) *Scope {
+       indirectScopeValue := scope.IndirectValue()
+
+       switch indirectScopeValue.Kind() {
+       case reflect.Slice:
+               if fieldStruct, ok := scope.GetModelStruct().ModelType.FieldByName(column); ok {
+                       fieldType := fieldStruct.Type
+                       if fieldType.Kind() == reflect.Slice || fieldType.Kind() == reflect.Ptr {
+                               fieldType = fieldType.Elem()
+                       }
+
+                       resultsMap := map[interface{}]bool{}
+                       results := reflect.New(reflect.SliceOf(reflect.PtrTo(fieldType))).Elem()
+
+                       for i := 0; i < indirectScopeValue.Len(); i++ {
+                               result := indirect(indirect(indirectScopeValue.Index(i)).FieldByName(column))
+
+                               if result.Kind() == reflect.Slice {
+                                       for j := 0; j < result.Len(); j++ {
+                                               if elem := result.Index(j); elem.CanAddr() && resultsMap[elem.Addr()] != true {
+                                                       resultsMap[elem.Addr()] = true
+                                                       results = reflect.Append(results, elem.Addr())
+                                               }
+                                       }
+                               } else if result.CanAddr() && resultsMap[result.Addr()] != true {
+                                       resultsMap[result.Addr()] = true
+                                       results = reflect.Append(results, result.Addr())
+                               }
+                       }
+                       return scope.New(results.Interface())
+               }
+       case reflect.Struct:
+               if field := indirectScopeValue.FieldByName(column); field.CanAddr() {
+                       return scope.New(field.Addr().Interface())
+               }
+       }
+       return nil
+}
+
+func (scope *Scope) hasConditions() bool {
+       return !scope.PrimaryKeyZero() ||
+               len(scope.Search.whereConditions) > 0 ||
+               len(scope.Search.orConditions) > 0 ||
+               len(scope.Search.notConditions) > 0
+}
diff --git a/vendor/github.com/jinzhu/gorm/scope_test.go b/vendor/github.com/jinzhu/gorm/scope_test.go
new file mode 100755 (executable)
index 0000000..3018f35
--- /dev/null
@@ -0,0 +1,80 @@
+package gorm_test
+
+import (
+       "encoding/hex"
+       "math/rand"
+       "strings"
+       "testing"
+
+       "github.com/jinzhu/gorm"
+)
+
+func NameIn1And2(d *gorm.DB) *gorm.DB {
+       return d.Where("name in (?)", []string{"ScopeUser1", "ScopeUser2"})
+}
+
+func NameIn2And3(d *gorm.DB) *gorm.DB {
+       return d.Where("name in (?)", []string{"ScopeUser2", "ScopeUser3"})
+}
+
+func NameIn(names []string) func(d *gorm.DB) *gorm.DB {
+       return func(d *gorm.DB) *gorm.DB {
+               return d.Where("name in (?)", names)
+       }
+}
+
+func TestScopes(t *testing.T) {
+       user1 := User{Name: "ScopeUser1", Age: 1}
+       user2 := User{Name: "ScopeUser2", Age: 1}
+       user3 := User{Name: "ScopeUser3", Age: 2}
+       DB.Save(&user1).Save(&user2).Save(&user3)
+
+       var users1, users2, users3 []User
+       DB.Scopes(NameIn1And2).Find(&users1)
+       if len(users1) != 2 {
+               t.Errorf("Should found two users's name in 1, 2")
+       }
+
+       DB.Scopes(NameIn1And2, NameIn2And3).Find(&users2)
+       if len(users2) != 1 {
+               t.Errorf("Should found one user's name is 2")
+       }
+
+       DB.Scopes(NameIn([]string{user1.Name, user3.Name})).Find(&users3)
+       if len(users3) != 2 {
+               t.Errorf("Should found two users's name in 1, 3")
+       }
+}
+
+func randName() string {
+       data := make([]byte, 8)
+       rand.Read(data)
+
+       return "n-" + hex.EncodeToString(data)
+}
+
+func TestValuer(t *testing.T) {
+       name := randName()
+
+       origUser := User{Name: name, Age: 1, Password: EncryptedData("pass1"), PasswordHash: []byte("abc")}
+       if err := DB.Save(&origUser).Error; err != nil {
+               t.Errorf("No error should happen when saving user, but got %v", err)
+       }
+
+       var user2 User
+       if err := DB.Where("name = ? AND password = ? AND password_hash = ?", name, EncryptedData("pass1"), []byte("abc")).First(&user2).Error; err != nil {
+               t.Errorf("No error should happen when querying user with valuer, but got %v", err)
+       }
+}
+
+func TestFailedValuer(t *testing.T) {
+       name := randName()
+
+       err := DB.Exec("INSERT INTO users(name, password) VALUES(?, ?)", name, EncryptedData("xpass1")).Error
+
+       if err == nil {
+               t.Errorf("There should be an error should happen when insert data")
+       } else if !strings.HasPrefix(err.Error(), "Should not start with") {
+               t.Errorf("The error should be returned from Valuer, but get %v", err)
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/search.go b/vendor/github.com/jinzhu/gorm/search.go
new file mode 100755 (executable)
index 0000000..9013859
--- /dev/null
@@ -0,0 +1,153 @@
+package gorm
+
+import (
+       "fmt"
+)
+
+type search struct {
+       db               *DB
+       whereConditions  []map[string]interface{}
+       orConditions     []map[string]interface{}
+       notConditions    []map[string]interface{}
+       havingConditions []map[string]interface{}
+       joinConditions   []map[string]interface{}
+       initAttrs        []interface{}
+       assignAttrs      []interface{}
+       selects          map[string]interface{}
+       omits            []string
+       orders           []interface{}
+       preload          []searchPreload
+       offset           interface{}
+       limit            interface{}
+       group            string
+       tableName        string
+       raw              bool
+       Unscoped         bool
+       ignoreOrderQuery bool
+}
+
+type searchPreload struct {
+       schema     string
+       conditions []interface{}
+}
+
+func (s *search) clone() *search {
+       clone := *s
+       return &clone
+}
+
+func (s *search) Where(query interface{}, values ...interface{}) *search {
+       s.whereConditions = append(s.whereConditions, map[string]interface{}{"query": query, "args": values})
+       return s
+}
+
+func (s *search) Not(query interface{}, values ...interface{}) *search {
+       s.notConditions = append(s.notConditions, map[string]interface{}{"query": query, "args": values})
+       return s
+}
+
+func (s *search) Or(query interface{}, values ...interface{}) *search {
+       s.orConditions = append(s.orConditions, map[string]interface{}{"query": query, "args": values})
+       return s
+}
+
+func (s *search) Attrs(attrs ...interface{}) *search {
+       s.initAttrs = append(s.initAttrs, toSearchableMap(attrs...))
+       return s
+}
+
+func (s *search) Assign(attrs ...interface{}) *search {
+       s.assignAttrs = append(s.assignAttrs, toSearchableMap(attrs...))
+       return s
+}
+
+func (s *search) Order(value interface{}, reorder ...bool) *search {
+       if len(reorder) > 0 && reorder[0] {
+               s.orders = []interface{}{}
+       }
+
+       if value != nil && value != "" {
+               s.orders = append(s.orders, value)
+       }
+       return s
+}
+
+func (s *search) Select(query interface{}, args ...interface{}) *search {
+       s.selects = map[string]interface{}{"query": query, "args": args}
+       return s
+}
+
+func (s *search) Omit(columns ...string) *search {
+       s.omits = columns
+       return s
+}
+
+func (s *search) Limit(limit interface{}) *search {
+       s.limit = limit
+       return s
+}
+
+func (s *search) Offset(offset interface{}) *search {
+       s.offset = offset
+       return s
+}
+
+func (s *search) Group(query string) *search {
+       s.group = s.getInterfaceAsSQL(query)
+       return s
+}
+
+func (s *search) Having(query interface{}, values ...interface{}) *search {
+       if val, ok := query.(*expr); ok {
+               s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": val.expr, "args": val.args})
+       } else {
+               s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": query, "args": values})
+       }
+       return s
+}
+
+func (s *search) Joins(query string, values ...interface{}) *search {
+       s.joinConditions = append(s.joinConditions, map[string]interface{}{"query": query, "args": values})
+       return s
+}
+
+func (s *search) Preload(schema string, values ...interface{}) *search {
+       var preloads []searchPreload
+       for _, preload := range s.preload {
+               if preload.schema != schema {
+                       preloads = append(preloads, preload)
+               }
+       }
+       preloads = append(preloads, searchPreload{schema, values})
+       s.preload = preloads
+       return s
+}
+
+func (s *search) Raw(b bool) *search {
+       s.raw = b
+       return s
+}
+
+func (s *search) unscoped() *search {
+       s.Unscoped = true
+       return s
+}
+
+func (s *search) Table(name string) *search {
+       s.tableName = name
+       return s
+}
+
+func (s *search) getInterfaceAsSQL(value interface{}) (str string) {
+       switch value.(type) {
+       case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+               str = fmt.Sprintf("%v", value)
+       default:
+               s.db.AddError(ErrInvalidSQL)
+       }
+
+       if str == "-1" {
+               return ""
+       }
+       return
+}
diff --git a/vendor/github.com/jinzhu/gorm/search_test.go b/vendor/github.com/jinzhu/gorm/search_test.go
new file mode 100755 (executable)
index 0000000..4db7ab6
--- /dev/null
@@ -0,0 +1,30 @@
+package gorm
+
+import (
+       "reflect"
+       "testing"
+)
+
+func TestCloneSearch(t *testing.T) {
+       s := new(search)
+       s.Where("name = ?", "jinzhu").Order("name").Attrs("name", "jinzhu").Select("name, age")
+
+       s1 := s.clone()
+       s1.Where("age = ?", 20).Order("age").Attrs("email", "a@e.org").Select("email")
+
+       if reflect.DeepEqual(s.whereConditions, s1.whereConditions) {
+               t.Errorf("Where should be copied")
+       }
+
+       if reflect.DeepEqual(s.orders, s1.orders) {
+               t.Errorf("Order should be copied")
+       }
+
+       if reflect.DeepEqual(s.initAttrs, s1.initAttrs) {
+               t.Errorf("InitAttrs should be copied")
+       }
+
+       if reflect.DeepEqual(s.Select, s1.Select) {
+               t.Errorf("selectStr should be copied")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/test_all.sh b/vendor/github.com/jinzhu/gorm/test_all.sh
new file mode 100755 (executable)
index 0000000..5cfb332
--- /dev/null
@@ -0,0 +1,5 @@
+dialects=("postgres" "mysql" "mssql" "sqlite")
+
+for dialect in "${dialects[@]}" ; do
+    DEBUG=false GORM_DIALECT=${dialect} go test
+done
diff --git a/vendor/github.com/jinzhu/gorm/update_test.go b/vendor/github.com/jinzhu/gorm/update_test.go
new file mode 100755 (executable)
index 0000000..85d53e5
--- /dev/null
@@ -0,0 +1,465 @@
+package gorm_test
+
+import (
+       "testing"
+       "time"
+
+       "github.com/jinzhu/gorm"
+)
+
+func TestUpdate(t *testing.T) {
+       product1 := Product{Code: "product1code"}
+       product2 := Product{Code: "product2code"}
+
+       DB.Save(&product1).Save(&product2).Update("code", "product2newcode")
+
+       if product2.Code != "product2newcode" {
+               t.Errorf("Record should be updated")
+       }
+
+       DB.First(&product1, product1.Id)
+       DB.First(&product2, product2.Id)
+       updatedAt1 := product1.UpdatedAt
+
+       if DB.First(&Product{}, "code = ?", product1.Code).RecordNotFound() {
+               t.Errorf("Product1 should not be updated")
+       }
+
+       if !DB.First(&Product{}, "code = ?", "product2code").RecordNotFound() {
+               t.Errorf("Product2's code should be updated")
+       }
+
+       if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() {
+               t.Errorf("Product2's code should be updated")
+       }
+
+       DB.Table("products").Where("code in (?)", []string{"product1code"}).Update("code", "product1newcode")
+
+       var product4 Product
+       DB.First(&product4, product1.Id)
+       if updatedAt1.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) {
+               t.Errorf("updatedAt should be updated if something changed")
+       }
+
+       if !DB.First(&Product{}, "code = 'product1code'").RecordNotFound() {
+               t.Errorf("Product1's code should be updated")
+       }
+
+       if DB.First(&Product{}, "code = 'product1newcode'").RecordNotFound() {
+               t.Errorf("Product should not be changed to 789")
+       }
+
+       if DB.Model(product2).Update("CreatedAt", time.Now().Add(time.Hour)).Error != nil {
+               t.Error("No error should raise when update with CamelCase")
+       }
+
+       if DB.Model(&product2).UpdateColumn("CreatedAt", time.Now().Add(time.Hour)).Error != nil {
+               t.Error("No error should raise when update_column with CamelCase")
+       }
+
+       var products []Product
+       DB.Find(&products)
+       if count := DB.Model(Product{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(products)) {
+               t.Error("RowsAffected should be correct when do batch update")
+       }
+
+       DB.First(&product4, product4.Id)
+       updatedAt4 := product4.UpdatedAt
+       DB.Model(&product4).Update("price", gorm.Expr("price + ? - ?", 100, 50))
+       var product5 Product
+       DB.First(&product5, product4.Id)
+       if product5.Price != product4.Price+100-50 {
+               t.Errorf("Update with expression")
+       }
+       if product4.UpdatedAt.Format(time.RFC3339Nano) == updatedAt4.Format(time.RFC3339Nano) {
+               t.Errorf("Update with expression should update UpdatedAt")
+       }
+}
+
+func TestUpdateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) {
+       animal := Animal{Name: "Ferdinand"}
+       DB.Save(&animal)
+       updatedAt1 := animal.UpdatedAt
+
+       DB.Save(&animal).Update("name", "Francis")
+
+       if updatedAt1.Format(time.RFC3339Nano) == animal.UpdatedAt.Format(time.RFC3339Nano) {
+               t.Errorf("updatedAt should not be updated if nothing changed")
+       }
+
+       var animals []Animal
+       DB.Find(&animals)
+       if count := DB.Model(Animal{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(animals)) {
+               t.Error("RowsAffected should be correct when do batch update")
+       }
+
+       animal = Animal{From: "somewhere"}              // No name fields, should be filled with the default value (galeone)
+       DB.Save(&animal).Update("From", "a nice place") // The name field shoul be untouched
+       DB.First(&animal, animal.Counter)
+       if animal.Name != "galeone" {
+               t.Errorf("Name fields shouldn't be changed if untouched, but got %v", animal.Name)
+       }
+
+       // When changing a field with a default value, the change must occur
+       animal.Name = "amazing horse"
+       DB.Save(&animal)
+       DB.First(&animal, animal.Counter)
+       if animal.Name != "amazing horse" {
+               t.Errorf("Update a filed with a default value should occur. But got %v\n", animal.Name)
+       }
+
+       // When changing a field with a default value with blank value
+       animal.Name = ""
+       DB.Save(&animal)
+       DB.First(&animal, animal.Counter)
+       if animal.Name != "" {
+               t.Errorf("Update a filed to blank with a default value should occur. But got %v\n", animal.Name)
+       }
+}
+
+func TestUpdates(t *testing.T) {
+       product1 := Product{Code: "product1code", Price: 10}
+       product2 := Product{Code: "product2code", Price: 10}
+       DB.Save(&product1).Save(&product2)
+       DB.Model(&product1).Updates(map[string]interface{}{"code": "product1newcode", "price": 100})
+       if product1.Code != "product1newcode" || product1.Price != 100 {
+               t.Errorf("Record should be updated also with map")
+       }
+
+       DB.First(&product1, product1.Id)
+       DB.First(&product2, product2.Id)
+       updatedAt2 := product2.UpdatedAt
+
+       if DB.First(&Product{}, "code = ? and price = ?", product2.Code, product2.Price).RecordNotFound() {
+               t.Errorf("Product2 should not be updated")
+       }
+
+       if DB.First(&Product{}, "code = ?", "product1newcode").RecordNotFound() {
+               t.Errorf("Product1 should be updated")
+       }
+
+       DB.Table("products").Where("code in (?)", []string{"product2code"}).Updates(Product{Code: "product2newcode"})
+       if !DB.First(&Product{}, "code = 'product2code'").RecordNotFound() {
+               t.Errorf("Product2's code should be updated")
+       }
+
+       var product4 Product
+       DB.First(&product4, product2.Id)
+       if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) {
+               t.Errorf("updatedAt should be updated if something changed")
+       }
+
+       if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() {
+               t.Errorf("product2's code should be updated")
+       }
+
+       updatedAt4 := product4.UpdatedAt
+       DB.Model(&product4).Updates(map[string]interface{}{"price": gorm.Expr("price + ?", 100)})
+       var product5 Product
+       DB.First(&product5, product4.Id)
+       if product5.Price != product4.Price+100 {
+               t.Errorf("Updates with expression")
+       }
+       // product4's UpdatedAt will be reset when updating
+       if product4.UpdatedAt.Format(time.RFC3339Nano) == updatedAt4.Format(time.RFC3339Nano) {
+               t.Errorf("Updates with expression should update UpdatedAt")
+       }
+}
+
+func TestUpdateColumn(t *testing.T) {
+       product1 := Product{Code: "product1code", Price: 10}
+       product2 := Product{Code: "product2code", Price: 20}
+       DB.Save(&product1).Save(&product2).UpdateColumn(map[string]interface{}{"code": "product2newcode", "price": 100})
+       if product2.Code != "product2newcode" || product2.Price != 100 {
+               t.Errorf("product 2 should be updated with update column")
+       }
+
+       var product3 Product
+       DB.First(&product3, product1.Id)
+       if product3.Code != "product1code" || product3.Price != 10 {
+               t.Errorf("product 1 should not be updated")
+       }
+
+       DB.First(&product2, product2.Id)
+       updatedAt2 := product2.UpdatedAt
+       DB.Model(product2).UpdateColumn("code", "update_column_new")
+       var product4 Product
+       DB.First(&product4, product2.Id)
+       if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) {
+               t.Errorf("updatedAt should not be updated with update column")
+       }
+
+       DB.Model(&product4).UpdateColumn("price", gorm.Expr("price + 100 - 50"))
+       var product5 Product
+       DB.First(&product5, product4.Id)
+       if product5.Price != product4.Price+100-50 {
+               t.Errorf("UpdateColumn with expression")
+       }
+       if product5.UpdatedAt.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) {
+               t.Errorf("UpdateColumn with expression should not update UpdatedAt")
+       }
+}
+
+func TestSelectWithUpdate(t *testing.T) {
+       user := getPreparedUser("select_user", "select_with_update")
+       DB.Create(user)
+
+       var reloadUser User
+       DB.First(&reloadUser, user.Id)
+       reloadUser.Name = "new_name"
+       reloadUser.Age = 50
+       reloadUser.BillingAddress = Address{Address1: "New Billing Address"}
+       reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"}
+       reloadUser.CreditCard = CreditCard{Number: "987654321"}
+       reloadUser.Emails = []Email{
+               {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"},
+       }
+       reloadUser.Company = Company{Name: "new company"}
+
+       DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser)
+
+       var queryUser User
+       DB.Preload("BillingAddress").Preload("ShippingAddress").
+               Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id)
+
+       if queryUser.Name == user.Name || queryUser.Age != user.Age {
+               t.Errorf("Should only update users with name column")
+       }
+
+       if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 ||
+               queryUser.ShippingAddressId != user.ShippingAddressId ||
+               queryUser.CreditCard.ID == user.CreditCard.ID ||
+               len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id {
+               t.Errorf("Should only update selected relationships")
+       }
+}
+
+func TestSelectWithUpdateWithMap(t *testing.T) {
+       user := getPreparedUser("select_user", "select_with_update_map")
+       DB.Create(user)
+
+       updateValues := map[string]interface{}{
+               "Name":            "new_name",
+               "Age":             50,
+               "BillingAddress":  Address{Address1: "New Billing Address"},
+               "ShippingAddress": Address{Address1: "New ShippingAddress Address"},
+               "CreditCard":      CreditCard{Number: "987654321"},
+               "Emails": []Email{
+                       {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"},
+               },
+               "Company": Company{Name: "new company"},
+       }
+
+       var reloadUser User
+       DB.First(&reloadUser, user.Id)
+       DB.Model(&reloadUser).Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues)
+
+       var queryUser User
+       DB.Preload("BillingAddress").Preload("ShippingAddress").
+               Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id)
+
+       if queryUser.Name == user.Name || queryUser.Age != user.Age {
+               t.Errorf("Should only update users with name column")
+       }
+
+       if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 ||
+               queryUser.ShippingAddressId != user.ShippingAddressId ||
+               queryUser.CreditCard.ID == user.CreditCard.ID ||
+               len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id {
+               t.Errorf("Should only update selected relationships")
+       }
+}
+
+func TestOmitWithUpdate(t *testing.T) {
+       user := getPreparedUser("omit_user", "omit_with_update")
+       DB.Create(user)
+
+       var reloadUser User
+       DB.First(&reloadUser, user.Id)
+       reloadUser.Name = "new_name"
+       reloadUser.Age = 50
+       reloadUser.BillingAddress = Address{Address1: "New Billing Address"}
+       reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"}
+       reloadUser.CreditCard = CreditCard{Number: "987654321"}
+       reloadUser.Emails = []Email{
+               {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"},
+       }
+       reloadUser.Company = Company{Name: "new company"}
+
+       DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser)
+
+       var queryUser User
+       DB.Preload("BillingAddress").Preload("ShippingAddress").
+               Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id)
+
+       if queryUser.Name != user.Name || queryUser.Age == user.Age {
+               t.Errorf("Should only update users with name column")
+       }
+
+       if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 ||
+               queryUser.ShippingAddressId == user.ShippingAddressId ||
+               queryUser.CreditCard.ID != user.CreditCard.ID ||
+               len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id {
+               t.Errorf("Should only update relationships that not omitted")
+       }
+}
+
+func TestOmitWithUpdateWithMap(t *testing.T) {
+       user := getPreparedUser("select_user", "select_with_update_map")
+       DB.Create(user)
+
+       updateValues := map[string]interface{}{
+               "Name":            "new_name",
+               "Age":             50,
+               "BillingAddress":  Address{Address1: "New Billing Address"},
+               "ShippingAddress": Address{Address1: "New ShippingAddress Address"},
+               "CreditCard":      CreditCard{Number: "987654321"},
+               "Emails": []Email{
+                       {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"},
+               },
+               "Company": Company{Name: "new company"},
+       }
+
+       var reloadUser User
+       DB.First(&reloadUser, user.Id)
+       DB.Model(&reloadUser).Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues)
+
+       var queryUser User
+       DB.Preload("BillingAddress").Preload("ShippingAddress").
+               Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id)
+
+       if queryUser.Name != user.Name || queryUser.Age == user.Age {
+               t.Errorf("Should only update users with name column")
+       }
+
+       if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 ||
+               queryUser.ShippingAddressId == user.ShippingAddressId ||
+               queryUser.CreditCard.ID != user.CreditCard.ID ||
+               len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id {
+               t.Errorf("Should only update relationships not omitted")
+       }
+}
+
+func TestSelectWithUpdateColumn(t *testing.T) {
+       user := getPreparedUser("select_user", "select_with_update_map")
+       DB.Create(user)
+
+       updateValues := map[string]interface{}{"Name": "new_name", "Age": 50}
+
+       var reloadUser User
+       DB.First(&reloadUser, user.Id)
+       DB.Model(&reloadUser).Select("Name").UpdateColumn(updateValues)
+
+       var queryUser User
+       DB.First(&queryUser, user.Id)
+
+       if queryUser.Name == user.Name || queryUser.Age != user.Age {
+               t.Errorf("Should only update users with name column")
+       }
+}
+
+func TestOmitWithUpdateColumn(t *testing.T) {
+       user := getPreparedUser("select_user", "select_with_update_map")
+       DB.Create(user)
+
+       updateValues := map[string]interface{}{"Name": "new_name", "Age": 50}
+
+       var reloadUser User
+       DB.First(&reloadUser, user.Id)
+       DB.Model(&reloadUser).Omit("Name").UpdateColumn(updateValues)
+
+       var queryUser User
+       DB.First(&queryUser, user.Id)
+
+       if queryUser.Name != user.Name || queryUser.Age == user.Age {
+               t.Errorf("Should omit name column when update user")
+       }
+}
+
+func TestUpdateColumnsSkipsAssociations(t *testing.T) {
+       user := getPreparedUser("update_columns_user", "special_role")
+       user.Age = 99
+       address1 := "first street"
+       user.BillingAddress = Address{Address1: address1}
+       DB.Save(user)
+
+       // Update a single field of the user and verify that the changed address is not stored.
+       newAge := int64(100)
+       user.BillingAddress.Address1 = "second street"
+       db := DB.Model(user).UpdateColumns(User{Age: newAge})
+       if db.RowsAffected != 1 {
+               t.Errorf("Expected RowsAffected=1 but instead RowsAffected=%v", DB.RowsAffected)
+       }
+
+       // Verify that Age now=`newAge`.
+       freshUser := &User{Id: user.Id}
+       DB.First(freshUser)
+       if freshUser.Age != newAge {
+               t.Errorf("Expected freshly queried user to have Age=%v but instead found Age=%v", newAge, freshUser.Age)
+       }
+
+       // Verify that user's BillingAddress.Address1 is not changed and is still "first street".
+       DB.First(&freshUser.BillingAddress, freshUser.BillingAddressID)
+       if freshUser.BillingAddress.Address1 != address1 {
+               t.Errorf("Expected user's BillingAddress.Address1=%s to remain unchanged after UpdateColumns invocation, but BillingAddress.Address1=%s", address1, freshUser.BillingAddress.Address1)
+       }
+}
+
+func TestUpdatesWithBlankValues(t *testing.T) {
+       product := Product{Code: "product1", Price: 10}
+       DB.Save(&product)
+
+       DB.Model(&Product{Id: product.Id}).Updates(&Product{Price: 100})
+
+       var product1 Product
+       DB.First(&product1, product.Id)
+
+       if product1.Code != "product1" || product1.Price != 100 {
+               t.Errorf("product's code should not be updated")
+       }
+}
+
+type ElementWithIgnoredField struct {
+       Id           int64
+       Value        string
+       IgnoredField int64 `sql:"-"`
+}
+
+func (e ElementWithIgnoredField) TableName() string {
+       return "element_with_ignored_field"
+}
+
+func TestUpdatesTableWithIgnoredValues(t *testing.T) {
+       elem := ElementWithIgnoredField{Value: "foo", IgnoredField: 10}
+       DB.Save(&elem)
+
+       DB.Table(elem.TableName()).
+               Where("id = ?", elem.Id).
+               // DB.Model(&ElementWithIgnoredField{Id: elem.Id}).
+               Updates(&ElementWithIgnoredField{Value: "bar", IgnoredField: 100})
+
+       var elem1 ElementWithIgnoredField
+       err := DB.First(&elem1, elem.Id).Error
+       if err != nil {
+               t.Errorf("error getting an element from database: %s", err.Error())
+       }
+
+       if elem1.IgnoredField != 0 {
+               t.Errorf("element's ignored field should not be updated")
+       }
+}
+
+func TestUpdateDecodeVirtualAttributes(t *testing.T) {
+       var user = User{
+               Name:     "jinzhu",
+               IgnoreMe: 88,
+       }
+
+       DB.Save(&user)
+
+       DB.Model(&user).Updates(User{Name: "jinzhu2", IgnoreMe: 100})
+
+       if user.IgnoreMe != 100 {
+               t.Errorf("should decode virtual attributes to struct, so it could be used in callbacks")
+       }
+}
diff --git a/vendor/github.com/jinzhu/gorm/utils.go b/vendor/github.com/jinzhu/gorm/utils.go
new file mode 100644 (file)
index 0000000..df8e541
--- /dev/null
@@ -0,0 +1,243 @@
+package gorm
+
+import (
+       "database/sql/driver"
+       "fmt"
+       "reflect"
+       "regexp"
+       "runtime"
+       "strings"
+       "sync"
+       "time"
+)
+
+// NowFunc returns current time, this function is exported in order to be able
+// to give the flexibility to the developer to customize it according to their
+// needs, e.g:
+//    gorm.NowFunc = func() time.Time {
+//      return time.Now().UTC()
+//    }
+var NowFunc = func() time.Time {
+       return time.Now()
+}
+
+// Copied from golint
+var commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UID", "UI", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"}
+var commonInitialismsReplacer *strings.Replacer
+
+var goSrcRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*.go`)
+var goTestRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*test.go`)
+
+func init() {
+       var commonInitialismsForReplacer []string
+       for _, initialism := range commonInitialisms {
+               commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism)))
+       }
+       commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...)
+}
+
+type safeMap struct {
+       m map[string]string
+       l *sync.RWMutex
+}
+
+func (s *safeMap) Set(key string, value string) {
+       s.l.Lock()
+       defer s.l.Unlock()
+       s.m[key] = value
+}
+
+func (s *safeMap) Get(key string) string {
+       s.l.RLock()
+       defer s.l.RUnlock()
+       return s.m[key]
+}
+
+func newSafeMap() *safeMap {
+       return &safeMap{l: new(sync.RWMutex), m: make(map[string]string)}
+}
+
+// SQL expression
+type expr struct {
+       expr string
+       args []interface{}
+}
+
+// Expr generate raw SQL expression, for example:
+//     DB.Model(&product).Update("price", gorm.Expr("price * ? + ?", 2, 100))
+func Expr(expression string, args ...interface{}) *expr {
+       return &expr{expr: expression, args: args}
+}
+
+func indirect(reflectValue reflect.Value) reflect.Value {
+       for reflectValue.Kind() == reflect.Ptr {
+               reflectValue = reflectValue.Elem()
+       }
+       return reflectValue
+}
+
+func toQueryMarks(primaryValues [][]interface{}) string {
+       var results []string
+
+       primaryValues = distinctValues(primaryValues)
+       for _, primaryValue := range primaryValues {
+               var marks []string
+               for range primaryValue {
+                       marks = append(marks, "?")
+               }
+
+               if len(marks) > 1 {
+                       results = append(results, fmt.Sprintf("(%v)", strings.Join(marks, ",")))
+               } else {
+                       results = append(results, strings.Join(marks, ""))
+               }
+       }
+       return strings.Join(results, ",")
+}
+
+func toQueryCondition(scope *Scope, columns []string) string {
+       var newColumns []string
+       for _, column := range columns {
+               newColumns = append(newColumns, scope.Quote(column))
+       }
+
+       if len(columns) > 1 {
+               return fmt.Sprintf("(%v)", strings.Join(newColumns, ","))
+       }
+       return strings.Join(newColumns, ",")
+}
+
+func toQueryValues(values [][]interface{}) (results []interface{}) {
+       values = distinctValues(values)
+       for _, value := range values {
+               for _, v := range value {
+                       results = append(results, v)
+               }
+       }
+       return
+}
+
+func fileWithLineNum() string {
+       for i := 2; i < 15; i++ {
+               _, file, line, ok := runtime.Caller(i)
+               if ok && (!goSrcRegexp.MatchString(file) || goTestRegexp.MatchString(file)) {
+                       return fmt.Sprintf("%v:%v", file, line)
+               }
+       }
+       return ""
+}
+
+func isBlank(value reflect.Value) bool {
+       switch value.Kind() {
+       case reflect.String:
+               return value.Len() == 0
+       case reflect.Bool:
+               return !value.Bool()
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return value.Int() == 0
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+               return value.Uint() == 0
+       case reflect.Float32, reflect.Float64:
+               return value.Float() == 0
+       case reflect.Interface, reflect.Ptr:
+               return value.IsNil()
+       }
+
+       return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface())
+}
+
+func toSearchableMap(attrs ...interface{}) (result interface{}) {
+       if len(attrs) > 1 {
+               if str, ok := attrs[0].(string); ok {
+                       result = map[string]interface{}{str: attrs[1]}
+               }
+       } else if len(attrs) == 1 {
+               if attr, ok := attrs[0].(map[string]interface{}); ok {
+                       result = attr
+               }
+
+               if attr, ok := attrs[0].(interface{}); ok {
+                       result = attr
+               }
+       }
+       return
+}
+
+func equalAsString(a interface{}, b interface{}) bool {
+       return toString(a) == toString(b)
+}
+
+func toString(str interface{}) string {
+       if values, ok := str.([]interface{}); ok {
+               var results []string
+               for _, value := range values {
+                       results = append(results, toString(value))
+               }
+               return strings.Join(results, "_")
+       } else if bytes, ok := str.([]byte); ok {
+               return string(bytes)
+       } else if reflectValue := reflect.Indirect(reflect.ValueOf(str)); reflectValue.IsValid() {
+               return fmt.Sprintf("%v", reflectValue.Interface())
+       }
+       return ""
+}
+
+func makeSlice(elemType reflect.Type) interface{} {
+       if elemType.Kind() == reflect.Slice {
+               elemType = elemType.Elem()
+       }
+       sliceType := reflect.SliceOf(elemType)
+       slice := reflect.New(sliceType)
+       slice.Elem().Set(reflect.MakeSlice(sliceType, 0, 0))
+       return slice.Interface()
+}
+
+func strInSlice(a string, list []string) bool {
+       for _, b := range list {
+               if b == a {
+                       return true
+               }
+       }
+       return false
+}
+
+// getValueFromFields return given fields's value
+func getValueFromFields(value reflect.Value, fieldNames []string) (results []interface{}) {
+       // If value is a nil pointer, Indirect returns a zero Value!
+       // Therefor we need to check for a zero value,
+       // as FieldByName could panic
+       if indirectValue := reflect.Indirect(value); indirectValue.IsValid() {
+               for _, fieldName := range fieldNames {
+                       if fieldValue := reflect.Indirect(indirectValue.FieldByName(fieldName)); fieldValue.IsValid() {
+                               result := fieldValue.Interface()
+                               if r, ok := result.(driver.Valuer); ok {
+                                       result, _ = r.Value()
+                               }
+                               results = append(results, result)
+                       }
+               }
+       }
+       return
+}
+
+func addExtraSpaceIfExist(str string) string {
+       if str != "" {
+               return " " + str
+       }
+       return ""
+}
+
+func distinctValues(values [][]interface{}) [][]interface{} {
+       var results [][]interface{}
+       keyMap := make(map[string]interface{})
+       for _, value := range values {
+               for _, val := range value {
+                       keyMap[fmt.Sprintf("%v", val)] = val
+               }
+       }
+       
+       for _, val := range keyMap {
+               results = append(results, []interface{}{val})
+       }
+       return results
+}
diff --git a/vendor/github.com/jinzhu/gorm/wercker.yml b/vendor/github.com/jinzhu/gorm/wercker.yml
new file mode 100755 (executable)
index 0000000..0c3e73e
--- /dev/null
@@ -0,0 +1,148 @@
+# use the default golang container from Docker Hub
+box: golang
+
+services:
+    - name: mariadb
+      id: mariadb:latest
+      env:
+          MYSQL_DATABASE: gorm
+          MYSQL_USER: gorm
+          MYSQL_PASSWORD: gorm
+          MYSQL_RANDOM_ROOT_PASSWORD: "yes"
+    - name: mysql57
+      id: mysql:5.7
+      env:
+          MYSQL_DATABASE: gorm
+          MYSQL_USER: gorm
+          MYSQL_PASSWORD: gorm
+          MYSQL_RANDOM_ROOT_PASSWORD: "yes"
+    - name: mysql56
+      id: mysql:5.6
+      env:
+          MYSQL_DATABASE: gorm
+          MYSQL_USER: gorm
+          MYSQL_PASSWORD: gorm
+          MYSQL_RANDOM_ROOT_PASSWORD: "yes"
+    - name: mysql55
+      id: mysql:5.5
+      env:
+          MYSQL_DATABASE: gorm
+          MYSQL_USER: gorm
+          MYSQL_PASSWORD: gorm
+          MYSQL_RANDOM_ROOT_PASSWORD: "yes"
+    - name: postgres
+      id: postgres:latest
+      env:
+          POSTGRES_USER: gorm
+          POSTGRES_PASSWORD: gorm
+          POSTGRES_DB: gorm
+    - name: postgres96
+      id: postgres:9.6
+      env:
+          POSTGRES_USER: gorm
+          POSTGRES_PASSWORD: gorm
+          POSTGRES_DB: gorm
+    - name: postgres95
+      id: postgres:9.5
+      env:
+          POSTGRES_USER: gorm
+          POSTGRES_PASSWORD: gorm
+          POSTGRES_DB: gorm
+    - name: postgres94
+      id: postgres:9.4
+      env:
+          POSTGRES_USER: gorm
+          POSTGRES_PASSWORD: gorm
+          POSTGRES_DB: gorm
+    - name: postgres93
+      id: postgres:9.3
+      env:
+          POSTGRES_USER: gorm
+          POSTGRES_PASSWORD: gorm
+          POSTGRES_DB: gorm
+    - name: mssql
+      id: mcmoe/mssqldocker:latest
+      env:
+        ACCEPT_EULA: Y
+        SA_PASSWORD: LoremIpsum86
+        MSSQL_DB: gorm
+        MSSQL_USER: gorm
+        MSSQL_PASSWORD: LoremIpsum86
+
+# The steps that will be executed in the build pipeline
+build:
+    # The steps that will be executed on build
+    steps:
+        # Sets the go workspace and places you package
+        # at the right place in the workspace tree
+        - setup-go-workspace
+
+        # Gets the dependencies
+        - script:
+                name: go get
+                code: |
+                    cd $WERCKER_SOURCE_DIR
+                    go version
+                    go get -t ./...
+
+        # Build the project
+        - script:
+                name: go build
+                code: |
+                    go build ./...
+
+        # Test the project
+        - script:
+                name: test sqlite
+                code: |
+                    go test ./...
+
+        - script:
+                name: test mariadb
+                code: |
+                    GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mariadb:3306)/gorm?charset=utf8&parseTime=True" go test ./...
+
+        - script:
+                name: test mysql5.7
+                code: |
+                    GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql57:3306)/gorm?charset=utf8&parseTime=True" go test ./...
+
+        - script:
+                name: test mysql5.6
+                code: |
+                    GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql56:3306)/gorm?charset=utf8&parseTime=True" go test ./...
+
+        - script:
+                name: test mysql5.5
+                code: |
+                    GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql55:3306)/gorm?charset=utf8&parseTime=True" go test ./...
+
+        - script:
+                name: test postgres
+                code: |
+                    GORM_DIALECT=postgres GORM_DSN="host=postgres user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./...
+
+        - script:
+                name: test postgres96
+                code: |
+                    GORM_DIALECT=postgres GORM_DSN="host=postgres96 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./...
+
+        - script:
+                name: test postgres95
+                code: |
+                    GORM_DIALECT=postgres GORM_DSN="host=postgres95 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./...
+
+        - script:
+                name: test postgres94
+                code: |
+                    GORM_DIALECT=postgres GORM_DSN="host=postgres94 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./...
+
+        - script:
+                name: test postgres93
+                code: |
+                    GORM_DIALECT=postgres GORM_DSN="host=postgres93 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./...
+
+        - script:
+                name: test mssql
+                code: |
+                    GORM_DIALECT=mssql GORM_DSN="sqlserver://gorm:LoremIpsum86@mssql:1433?database=gorm" go test ./...
diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE
new file mode 100755 (executable)
index 0000000..a1ca9a0
--- /dev/null
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 - Jinzhu
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md
new file mode 100755 (executable)
index 0000000..a3de336
--- /dev/null
@@ -0,0 +1,55 @@
+# Inflection
+
+Inflection pluralizes and singularizes English nouns
+
+[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930)
+
+## Basic Usage
+
+```go
+inflection.Plural("person") => "people"
+inflection.Plural("Person") => "People"
+inflection.Plural("PERSON") => "PEOPLE"
+inflection.Plural("bus")    => "buses"
+inflection.Plural("BUS")    => "BUSES"
+inflection.Plural("Bus")    => "Buses"
+
+inflection.Singular("people") => "person"
+inflection.Singular("People") => "Person"
+inflection.Singular("PEOPLE") => "PERSON"
+inflection.Singular("buses")  => "bus"
+inflection.Singular("BUSES")  => "BUS"
+inflection.Singular("Buses")  => "Bus"
+
+inflection.Plural("FancyPerson") => "FancyPeople"
+inflection.Singular("FancyPeople") => "FancyPerson"
+```
+
+## Register Rules
+
+Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
+
+If you want to register more rules, follow:
+
+```
+inflection.AddUncountable("fish")
+inflection.AddIrregular("person", "people")
+inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
+inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
+```
+
+## Contributing
+
+You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
+
+## Author
+
+**jinzhu**
+
+* <http://github.com/jinzhu>
+* <wosmvp@gmail.com>
+* <http://twitter.com/zhangjinzhu>
+
+## License
+
+Released under the [MIT License](http://www.opensource.org/licenses/MIT).
diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go
new file mode 100755 (executable)
index 0000000..606263b
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+Package inflection pluralizes and singularizes English nouns.
+
+               inflection.Plural("person") => "people"
+               inflection.Plural("Person") => "People"
+               inflection.Plural("PERSON") => "PEOPLE"
+
+               inflection.Singular("people") => "person"
+               inflection.Singular("People") => "Person"
+               inflection.Singular("PEOPLE") => "PERSON"
+
+               inflection.Plural("FancyPerson") => "FancydPeople"
+               inflection.Singular("FancyPeople") => "FancydPerson"
+
+Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
+
+If you want to register more rules, follow:
+
+               inflection.AddUncountable("fish")
+               inflection.AddIrregular("person", "people")
+               inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
+               inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
+*/
+package inflection
+
+import (
+       "regexp"
+       "strings"
+)
+
+type inflection struct {
+       regexp  *regexp.Regexp
+       replace string
+}
+
+// Regular is a regexp find replace inflection
+type Regular struct {
+       find    string
+       replace string
+}
+
+// Irregular is a hard replace inflection,
+// containing both singular and plural forms
+type Irregular struct {
+       singular string
+       plural   string
+}
+
+// RegularSlice is a slice of Regular inflections
+type RegularSlice []Regular
+
+// IrregularSlice is a slice of Irregular inflections
+type IrregularSlice []Irregular
+
+var pluralInflections = RegularSlice{
+       {"([a-z])$", "${1}s"},
+       {"s$", "s"},
+       {"^(ax|test)is$", "${1}es"},
+       {"(octop|vir)us$", "${1}i"},
+       {"(octop|vir)i$", "${1}i"},
+       {"(alias|status)$", "${1}es"},
+       {"(bu)s$", "${1}ses"},
+       {"(buffal|tomat)o$", "${1}oes"},
+       {"([ti])um$", "${1}a"},
+       {"([ti])a$", "${1}a"},
+       {"sis$", "ses"},
+       {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"},
+       {"(hive)$", "${1}s"},
+       {"([^aeiouy]|qu)y$", "${1}ies"},
+       {"(x|ch|ss|sh)$", "${1}es"},
+       {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"},
+       {"^(m|l)ouse$", "${1}ice"},
+       {"^(m|l)ice$", "${1}ice"},
+       {"^(ox)$", "${1}en"},
+       {"^(oxen)$", "${1}"},
+       {"(quiz)$", "${1}zes"},
+}
+
+var singularInflections = RegularSlice{
+       {"s$", ""},
+       {"(ss)$", "${1}"},
+       {"(n)ews$", "${1}ews"},
+       {"([ti])a$", "${1}um"},
+       {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"},
+       {"(^analy)(sis|ses)$", "${1}sis"},
+       {"([^f])ves$", "${1}fe"},
+       {"(hive)s$", "${1}"},
+       {"(tive)s$", "${1}"},
+       {"([lr])ves$", "${1}f"},
+       {"([^aeiouy]|qu)ies$", "${1}y"},
+       {"(s)eries$", "${1}eries"},
+       {"(m)ovies$", "${1}ovie"},
+       {"(c)ookies$", "${1}ookie"},
+       {"(x|ch|ss|sh)es$", "${1}"},
+       {"^(m|l)ice$", "${1}ouse"},
+       {"(bus)(es)?$", "${1}"},
+       {"(o)es$", "${1}"},
+       {"(shoe)s$", "${1}"},
+       {"(cris|test)(is|es)$", "${1}is"},
+       {"^(a)x[ie]s$", "${1}xis"},
+       {"(octop|vir)(us|i)$", "${1}us"},
+       {"(alias|status)(es)?$", "${1}"},
+       {"^(ox)en", "${1}"},
+       {"(vert|ind)ices$", "${1}ex"},
+       {"(matr)ices$", "${1}ix"},
+       {"(quiz)zes$", "${1}"},
+       {"(database)s$", "${1}"},
+}
+
+var irregularInflections = IrregularSlice{
+       {"person", "people"},
+       {"man", "men"},
+       {"child", "children"},
+       {"sex", "sexes"},
+       {"move", "moves"},
+       {"mombie", "mombies"},
+}
+
+var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"}
+
+var compiledPluralMaps []inflection
+var compiledSingularMaps []inflection
+
+func compile() {
+       compiledPluralMaps = []inflection{}
+       compiledSingularMaps = []inflection{}
+       for _, uncountable := range uncountableInflections {
+               inf := inflection{
+                       regexp:  regexp.MustCompile("^(?i)(" + uncountable + ")$"),
+                       replace: "${1}",
+               }
+               compiledPluralMaps = append(compiledPluralMaps, inf)
+               compiledSingularMaps = append(compiledSingularMaps, inf)
+       }
+
+       for _, value := range irregularInflections {
+               infs := []inflection{
+                       inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)},
+                       inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)},
+                       inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural},
+               }
+               compiledPluralMaps = append(compiledPluralMaps, infs...)
+       }
+
+       for _, value := range irregularInflections {
+               infs := []inflection{
+                       inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)},
+                       inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)},
+                       inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular},
+               }
+               compiledSingularMaps = append(compiledSingularMaps, infs...)
+       }
+
+       for i := len(pluralInflections) - 1; i >= 0; i-- {
+               value := pluralInflections[i]
+               infs := []inflection{
+                       inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
+                       inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
+                       inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
+               }
+               compiledPluralMaps = append(compiledPluralMaps, infs...)
+       }
+
+       for i := len(singularInflections) - 1; i >= 0; i-- {
+               value := singularInflections[i]
+               infs := []inflection{
+                       inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
+                       inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
+                       inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
+               }
+               compiledSingularMaps = append(compiledSingularMaps, infs...)
+       }
+}
+
+func init() {
+       compile()
+}
+
+// AddPlural adds a plural inflection
+func AddPlural(find, replace string) {
+       pluralInflections = append(pluralInflections, Regular{find, replace})
+       compile()
+}
+
+// AddSingular adds a singular inflection
+func AddSingular(find, replace string) {
+       singularInflections = append(singularInflections, Regular{find, replace})
+       compile()
+}
+
+// AddIrregular adds an irregular inflection
+func AddIrregular(singular, plural string) {
+       irregularInflections = append(irregularInflections, Irregular{singular, plural})
+       compile()
+}
+
+// AddUncountable adds an uncountable inflection
+func AddUncountable(values ...string) {
+       uncountableInflections = append(uncountableInflections, values...)
+       compile()
+}
+
+// GetPlural retrieves the plural inflection values
+func GetPlural() RegularSlice {
+       plurals := make(RegularSlice, len(pluralInflections))
+       copy(plurals, pluralInflections)
+       return plurals
+}
+
+// GetSingular retrieves the singular inflection values
+func GetSingular() RegularSlice {
+       singulars := make(RegularSlice, len(singularInflections))
+       copy(singulars, singularInflections)
+       return singulars
+}
+
+// GetIrregular retrieves the irregular inflection values
+func GetIrregular() IrregularSlice {
+       irregular := make(IrregularSlice, len(irregularInflections))
+       copy(irregular, irregularInflections)
+       return irregular
+}
+
+// GetUncountable retrieves the uncountable inflection values
+func GetUncountable() []string {
+       uncountables := make([]string, len(uncountableInflections))
+       copy(uncountables, uncountableInflections)
+       return uncountables
+}
+
+// SetPlural sets the plural inflections slice
+func SetPlural(inflections RegularSlice) {
+       pluralInflections = inflections
+       compile()
+}
+
+// SetSingular sets the singular inflections slice
+func SetSingular(inflections RegularSlice) {
+       singularInflections = inflections
+       compile()
+}
+
+// SetIrregular sets the irregular inflections slice
+func SetIrregular(inflections IrregularSlice) {
+       irregularInflections = inflections
+       compile()
+}
+
+// SetUncountable sets the uncountable inflections slice
+func SetUncountable(inflections []string) {
+       uncountableInflections = inflections
+       compile()
+}
+
+// Plural converts a word to its plural form
+func Plural(str string) string {
+       for _, inflection := range compiledPluralMaps {
+               if inflection.regexp.MatchString(str) {
+                       return inflection.regexp.ReplaceAllString(str, inflection.replace)
+               }
+       }
+       return str
+}
+
+// Singular converts a word to its singular form
+func Singular(str string) string {
+       for _, inflection := range compiledSingularMaps {
+               if inflection.regexp.MatchString(str) {
+                       return inflection.regexp.ReplaceAllString(str, inflection.replace)
+               }
+       }
+       return str
+}
diff --git a/vendor/github.com/jinzhu/inflection/inflections_test.go b/vendor/github.com/jinzhu/inflection/inflections_test.go
new file mode 100755 (executable)
index 0000000..689e1df
--- /dev/null
@@ -0,0 +1,213 @@
+package inflection
+
+import (
+       "strings"
+       "testing"
+)
+
+var inflections = map[string]string{
+       "star":        "stars",
+       "STAR":        "STARS",
+       "Star":        "Stars",
+       "bus":         "buses",
+       "fish":        "fish",
+       "mouse":       "mice",
+       "query":       "queries",
+       "ability":     "abilities",
+       "agency":      "agencies",
+       "movie":       "movies",
+       "archive":     "archives",
+       "index":       "indices",
+       "wife":        "wives",
+       "safe":        "saves",
+       "half":        "halves",
+       "move":        "moves",
+       "salesperson": "salespeople",
+       "person":      "people",
+       "spokesman":   "spokesmen",
+       "man":         "men",
+       "woman":       "women",
+       "basis":       "bases",
+       "diagnosis":   "diagnoses",
+       "diagnosis_a": "diagnosis_as",
+       "datum":       "data",
+       "medium":      "media",
+       "stadium":     "stadia",
+       "analysis":    "analyses",
+       "node_child":  "node_children",
+       "child":       "children",
+       "experience":  "experiences",
+       "day":         "days",
+       "comment":     "comments",
+       "foobar":      "foobars",
+       "newsletter":  "newsletters",
+       "old_news":    "old_news",
+       "news":        "news",
+       "series":      "series",
+       "species":     "species",
+       "quiz":        "quizzes",
+       "perspective": "perspectives",
+       "ox":          "oxen",
+       "photo":       "photos",
+       "buffalo":     "buffaloes",
+       "tomato":      "tomatoes",
+       "dwarf":       "dwarves",
+       "elf":         "elves",
+       "information": "information",
+       "equipment":   "equipment",
+       "criterion":   "criteria",
+}
+
+// storage is used to restore the state of the global variables
+// on each test execution, to ensure no global state pollution
+type storage struct {
+       singulars    RegularSlice
+       plurals      RegularSlice
+       irregulars   IrregularSlice
+       uncountables []string
+}
+
+var backup = storage{}
+
+func init() {
+       AddIrregular("criterion", "criteria")
+       copy(backup.singulars, singularInflections)
+       copy(backup.plurals, pluralInflections)
+       copy(backup.irregulars, irregularInflections)
+       copy(backup.uncountables, uncountableInflections)
+}
+
+func restore() {
+       copy(singularInflections, backup.singulars)
+       copy(pluralInflections, backup.plurals)
+       copy(irregularInflections, backup.irregulars)
+       copy(uncountableInflections, backup.uncountables)
+}
+
+func TestPlural(t *testing.T) {
+       for key, value := range inflections {
+               if v := Plural(strings.ToUpper(key)); v != strings.ToUpper(value) {
+                       t.Errorf("%v's plural should be %v, but got %v", strings.ToUpper(key), strings.ToUpper(value), v)
+               }
+
+               if v := Plural(strings.Title(key)); v != strings.Title(value) {
+                       t.Errorf("%v's plural should be %v, but got %v", strings.Title(key), strings.Title(value), v)
+               }
+
+               if v := Plural(key); v != value {
+                       t.Errorf("%v's plural should be %v, but got %v", key, value, v)
+               }
+       }
+}
+
+func TestSingular(t *testing.T) {
+       for key, value := range inflections {
+               if v := Singular(strings.ToUpper(value)); v != strings.ToUpper(key) {
+                       t.Errorf("%v's singular should be %v, but got %v", strings.ToUpper(value), strings.ToUpper(key), v)
+               }
+
+               if v := Singular(strings.Title(value)); v != strings.Title(key) {
+                       t.Errorf("%v's singular should be %v, but got %v", strings.Title(value), strings.Title(key), v)
+               }
+
+               if v := Singular(value); v != key {
+                       t.Errorf("%v's singular should be %v, but got %v", value, key, v)
+               }
+       }
+}
+
+func TestAddPlural(t *testing.T) {
+       defer restore()
+       ln := len(pluralInflections)
+       AddPlural("", "")
+       if ln+1 != len(pluralInflections) {
+               t.Errorf("Expected len %d, got %d", ln+1, len(pluralInflections))
+       }
+}
+
+func TestAddSingular(t *testing.T) {
+       defer restore()
+       ln := len(singularInflections)
+       AddSingular("", "")
+       if ln+1 != len(singularInflections) {
+               t.Errorf("Expected len %d, got %d", ln+1, len(singularInflections))
+       }
+}
+
+func TestAddIrregular(t *testing.T) {
+       defer restore()
+       ln := len(irregularInflections)
+       AddIrregular("", "")
+       if ln+1 != len(irregularInflections) {
+               t.Errorf("Expected len %d, got %d", ln+1, len(irregularInflections))
+       }
+}
+
+func TestAddUncountable(t *testing.T) {
+       defer restore()
+       ln := len(uncountableInflections)
+       AddUncountable("", "")
+       if ln+2 != len(uncountableInflections) {
+               t.Errorf("Expected len %d, got %d", ln+2, len(uncountableInflections))
+       }
+}
+
+func TestGetPlural(t *testing.T) {
+       plurals := GetPlural()
+       if len(plurals) != len(pluralInflections) {
+               t.Errorf("Expected len %d, got %d", len(plurals), len(pluralInflections))
+       }
+}
+
+func TestGetSingular(t *testing.T) {
+       singular := GetSingular()
+       if len(singular) != len(singularInflections) {
+               t.Errorf("Expected len %d, got %d", len(singular), len(singularInflections))
+       }
+}
+
+func TestGetIrregular(t *testing.T) {
+       irregular := GetIrregular()
+       if len(irregular) != len(irregularInflections) {
+               t.Errorf("Expected len %d, got %d", len(irregular), len(irregularInflections))
+       }
+}
+
+func TestGetUncountable(t *testing.T) {
+       uncountables := GetUncountable()
+       if len(uncountables) != len(uncountableInflections) {
+               t.Errorf("Expected len %d, got %d", len(uncountables), len(uncountableInflections))
+       }
+}
+
+func TestSetPlural(t *testing.T) {
+       defer restore()
+       SetPlural(RegularSlice{{}, {}})
+       if len(pluralInflections) != 2 {
+               t.Errorf("Expected len 2, got %d", len(pluralInflections))
+       }
+}
+
+func TestSetSingular(t *testing.T) {
+       defer restore()
+       SetSingular(RegularSlice{{}, {}})
+       if len(singularInflections) != 2 {
+               t.Errorf("Expected len 2, got %d", len(singularInflections))
+       }
+}
+
+func TestSetIrregular(t *testing.T) {
+       defer restore()
+       SetIrregular(IrregularSlice{{}, {}})
+       if len(irregularInflections) != 2 {
+               t.Errorf("Expected len 2, got %d", len(irregularInflections))
+       }
+}
+
+func TestSetUncountable(t *testing.T) {
+       defer restore()
+       SetUncountable([]string{"", ""})
+       if len(uncountableInflections) != 2 {
+               t.Errorf("Expected len 2, got %d", len(uncountableInflections))
+       }
+}
diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml
new file mode 100755 (executable)
index 0000000..5e6ce98
--- /dev/null
@@ -0,0 +1,23 @@
+box: golang
+
+build:
+  steps:
+    - setup-go-workspace
+
+    # Gets the dependencies
+    - script:
+        name: go get
+        code: |
+          go get
+
+    # Build the project
+    - script:
+        name: go build
+        code: |
+          go build ./...
+
+    # Test the project
+    - script:
+        name: go test
+        code: |
+          go test ./...