From abaf76a04766cf5a084052e53db23e50b5d1564c Mon Sep 17 00:00:00 2001 From: Bobby Rullo Date: Wed, 19 Aug 2015 15:50:44 -0700 Subject: [PATCH 1/4] godep: add github.com/rubenv/sql-migrate --- Godeps/Godeps.json | 11 +- .../github.com/rubenv/sql-migrate/.gitignore | 5 + .../github.com/rubenv/sql-migrate/.travis.yml | 25 + .../github.com/rubenv/sql-migrate/README.md | 245 ++ .../rubenv/sql-migrate/bindata_test.go | 136 ++ .../src/github.com/rubenv/sql-migrate/doc.go | 199 ++ .../rubenv/sql-migrate/init_test.go | 9 + .../github.com/rubenv/sql-migrate/migrate.go | 475 ++++ .../rubenv/sql-migrate/migrate_test.go | 357 +++ .../rubenv/sql-migrate/sort_test.go | 34 + .../sql-migrate/sql-migrate/command_common.go | 63 + .../sql-migrate/sql-migrate/command_down.go | 55 + .../sql-migrate/sql-migrate/command_redo.go | 88 + .../sql-migrate/sql-migrate/command_status.go | 113 + .../sql-migrate/sql-migrate/command_up.go | 55 + .../rubenv/sql-migrate/sql-migrate/config.go | 103 + .../rubenv/sql-migrate/sql-migrate/main.go | 46 + .../sql-migrate/sql-migrate/main_test.go | 1 + .../rubenv/sql-migrate/sql-migrate/mssql.go | 12 + .../rubenv/sql-migrate/sqlparse/README.md | 28 + .../rubenv/sql-migrate/sqlparse/sqlparse.go | 128 + .../sql-migrate/sqlparse/sqlparse_test.go | 151 ++ .../sql-migrate/test-integration/dbconfig.yml | 20 + .../test-integration/mysql-flag.sh | 10 + .../sql-migrate/test-integration/mysql.sh | 14 + .../sql-migrate/test-integration/postgres.sh | 14 + .../sql-migrate/test-integration/sqlite.sh | 17 + .../sql-migrate/test-migrations/1_initial.sql | 8 + .../sql-migrate/test-migrations/2_record.sql | 5 + .../rubenv/sql-migrate/toapply_test.go | 101 + .../src/gopkg.in/gorp.v1/.gitignore | 8 + .../src/gopkg.in/gorp.v1/.travis.yml | 21 + .../_workspace/src/gopkg.in/gorp.v1/LICENSE | 22 + .../_workspace/src/gopkg.in/gorp.v1/Makefile | 6 + .../_workspace/src/gopkg.in/gorp.v1/README.md | 672 ++++++ .../src/gopkg.in/gorp.v1/dialect.go | 692 ++++++ .../_workspace/src/gopkg.in/gorp.v1/errors.go | 26 + .../_workspace/src/gopkg.in/gorp.v1/gorp.go | 2085 +++++++++++++++++ .../src/gopkg.in/gorp.v1/gorp_test.go | 2083 ++++++++++++++++ .../src/gopkg.in/gorp.v1/test_all.sh | 22 + 40 files changed, 8164 insertions(+), 1 deletion(-) create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/.gitignore create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/README.md create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/bindata_test.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/doc.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/init_test.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/migrate.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/migrate_test.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sort_test.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_common.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_down.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_redo.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_status.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_up.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/config.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/main.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/main_test.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/mssql.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/README.md create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/sqlparse_test.go create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/dbconfig.yml create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/mysql-flag.sh create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/mysql.sh create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/postgres.sh create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/sqlite.sh create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-migrations/1_initial.sql create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-migrations/2_record.sql create mode 100644 Godeps/_workspace/src/github.com/rubenv/sql-migrate/toapply_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/.gitignore create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/Makefile create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/README.md create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/dialect.go create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/errors.go create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/gorp.go create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/gorp_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/gorp.v1/test_all.sh diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 291d4927..29ca517c 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,5 +1,5 @@ { - "ImportPath": "github.com/coreos-inc/auth", + "ImportPath": "github.com/coreos/dex", "GoVersion": "go1.4.2", "Packages": [ "./..." @@ -96,6 +96,10 @@ "ImportPath": "github.com/mbanzon/simplehttp", "Rev": "04c542e7ac706a25820090f274ea6a4f39a63326" }, + { + "ImportPath": "github.com/rubenv/sql-migrate", + "Rev": "53184e1edfb4f9655b0fa8dd2c23e7763f452bda" + }, { "ImportPath": "golang.org/x/crypto/bcrypt", "Rev": "1fbbd62cfec66bd39d91e97749579579d4d3037e" @@ -115,6 +119,11 @@ { "ImportPath": "google.golang.org/api/googleapi", "Rev": "d3edb0282bde692467788c50070a9211afe75cf3" + }, + { + "ImportPath": "gopkg.in/gorp.v1", + "Comment": "v1.7.1", + "Rev": "c87af80f3cc5036b55b83d77171e156791085e2e" } ] } diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/.gitignore b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/.gitignore new file mode 100644 index 00000000..a2cac92c --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/.gitignore @@ -0,0 +1,5 @@ +.*.swp +*.test + +/sql-migrate/test.db +/test.db diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/.travis.yml b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/.travis.yml new file mode 100644 index 00000000..9f30ce00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/.travis.yml @@ -0,0 +1,25 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - tip + +services: + - mysql + +before_install: + - mysql -e "CREATE DATABASE IF NOT EXISTS test;" -uroot + - psql -c "CREATE DATABASE test;" -U postgres + +install: + - go get -t ./... + - go install ./... + +script: + - go test -v ./... + - bash test-integration/postgres.sh + - bash test-integration/mysql.sh + - bash test-integration/mysql-flag.sh + - bash test-integration/sqlite.sh diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/README.md b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/README.md new file mode 100644 index 00000000..897a5f69 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/README.md @@ -0,0 +1,245 @@ +# sql-migrate + +> SQL Schema migration tool for [Go](http://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose). + +[![Build Status](https://travis-ci.org/rubenv/sql-migrate.svg?branch=master)](https://travis-ci.org/rubenv/sql-migrate) [![GoDoc](https://godoc.org/github.com/rubenv/sql-migrate?status.png)](https://godoc.org/github.com/rubenv/sql-migrate) + +Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://github.com/rubenv/modl-migrate). + +## Features + +* Usable as a CLI tool or as a library +* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp)) +* Can embed migrations into your application +* Migrations are defined with SQL for full flexibility +* Atomic migrations +* Up/down migrations to allow rollback +* Supports multiple database types in one project + +## Installation + +To install the library and command line program, use the following: + +```bash +go get github.com/rubenv/sql-migrate/... +``` + +## Usage +### As a standalone tool +``` +$ sql-migrate --help +usage: sql-migrate [--version] [--help] [] + +Available commands are: + down Undo a database migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available +``` + +Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments: + +```yml +development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + +production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations +``` + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the `-env` flag (defaults to `development`). + +Use the `--help` flag in combination with any of the commands to get an overview of its usage: + +``` +$ sql-migrate up --help +Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + +Options: + + -config=config.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. +``` + +The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter. + +The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the `status` command to see the state of the applied migrations: + +```bash +$ sql-migrate status ++---------------+-----------------------------------------+ +| MIGRATION | APPLIED | ++---------------+-----------------------------------------+ +| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | +| 2_record.sql | no | ++---------------+-----------------------------------------+ +``` + +### As a library +Import sql-migrate into your application: + +```go +import "github.com/rubenv/sql-migrate" +``` + +Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later): + +```go +// Hardcoded strings in memory: +migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, +} + +// OR: Read migrations from a folder: +migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", +} + +// OR: Use migrations from bindata: +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", +} +``` + +Then use the `Exec` function to upgrade your database: + +```go +db, err := sql.Open("sqlite3", filename) +if err != nil { + // Handle errors! +} + +n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) +if err != nil { + // Handle errors! +} +fmt.Printf("Applied %d migrations!\n", n) +``` + +Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation. + +## Writing migrations +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + +```sql +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied +CREATE TABLE people (id int); + + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back +DROP TABLE people; +``` + +You can put multiple statements in each block, as long as you end them with a semicolon (`;`). + +If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries: + +```sql +-- +migrate Up +CREATE TABLE people (id int); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION do_something() +returns void AS $$ +DECLARE + create_query text; +BEGIN + -- Do something here +END; +$$ +language plpgsql; +-- +migrate StatementEnd + +-- +migrate Down +DROP FUNCTION do_something(); +DROP TABLE people; +``` + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +## Embedding migrations with [bindata](https://github.com/jteeuwen/go-bindata) +If you like your Go applications self-contained (that is: a single binary): use [bindata](https://github.com/jteeuwen/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a `.go` file with the migrations embedded: + +```bash +go-bindata -pkg myapp -o bindata.go db/migrations/ +``` + +The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives). + +Use the `AssetMigrationSource` in your application to find the migrations: + +```go +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", +} +``` + +Both `Asset` and `AssetDir` are functions provided by bindata. + +Then proceed as usual. + +## Extending +Adding a new migration source means implementing `MigrationSource`. + +```go +type MigrationSource interface { + FindMigrations() ([]*Migration, error) +} +``` + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field. + +## License + + (The MIT License) + + Copyright (C) 2014-2015 by Ruben Vermeersch + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/bindata_test.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/bindata_test.go new file mode 100644 index 00000000..21cd4220 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/bindata_test.go @@ -0,0 +1,136 @@ +package migrate + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "strings" +) + +func bindata_read(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + return buf.Bytes(), nil +} + +func test_migrations_1_initial_sql() ([]byte, error) { + return bindata_read([]byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x00, 0xff, 0x8c, 0xcd, + 0x3d, 0x0e, 0x82, 0x40, 0x10, 0x05, 0xe0, 0x7e, 0x4e, 0xf1, 0x3a, 0x34, + 0x86, 0x13, 0x50, 0xa1, 0xd0, 0x91, 0xa8, 0x08, 0x07, 0x40, 0x76, 0x22, + 0x13, 0xd7, 0xdd, 0x09, 0xac, 0xc1, 0xe3, 0xbb, 0xc4, 0x68, 0xb4, 0xb3, + 0x7c, 0x6f, 0x7e, 0xbe, 0x34, 0xc5, 0xe6, 0x26, 0x97, 0xb1, 0x0b, 0x8c, + 0x56, 0x29, 0xc6, 0xd3, 0xb1, 0x82, 0x38, 0x4c, 0xdc, 0x07, 0xf1, 0x0e, + 0x49, 0xab, 0x09, 0x64, 0x02, 0x3f, 0xb8, 0xbf, 0x07, 0x36, 0x98, 0x07, + 0x76, 0x08, 0x43, 0xac, 0x5e, 0x77, 0xcb, 0x52, 0x0c, 0x9d, 0xaa, 0x15, + 0x36, 0xb4, 0xab, 0xcb, 0xbc, 0x29, 0xd1, 0xe4, 0xdb, 0xaa, 0x84, 0xb2, + 0x57, 0xcb, 0x58, 0x89, 0x89, 0x2f, 0xc3, 0x3a, 0x23, 0xa2, 0x6f, 0xb0, + 0xf0, 0xb3, 0x7b, 0x93, 0x1f, 0x6f, 0x29, 0xff, 0x12, 0x47, 0x6f, 0x6d, + 0x9c, 0x9e, 0xbb, 0xfe, 0x4a, 0x45, 0xbd, 0x3f, 0xfc, 0x98, 0x19, 0x3d, + 0x03, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x70, 0x5e, 0xf9, 0xda, 0x00, 0x00, + 0x00, + }, + "test-migrations/1_initial.sql", + ) +} + +func test_migrations_2_record_sql() ([]byte, error) { + return bindata_read([]byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x00, 0xff, 0xd2, 0xd5, + 0x55, 0xd0, 0xce, 0xcd, 0x4c, 0x2f, 0x4a, 0x2c, 0x49, 0x55, 0x08, 0x2d, + 0xe0, 0xf2, 0xf4, 0x0b, 0x76, 0x0d, 0x0a, 0x51, 0xf0, 0xf4, 0x0b, 0xf1, + 0x57, 0x28, 0x48, 0xcd, 0x2f, 0xc8, 0x49, 0x55, 0xd0, 0xc8, 0x4c, 0xd1, + 0x54, 0x08, 0x73, 0xf4, 0x09, 0x75, 0x0d, 0x56, 0xd0, 0x30, 0xd4, 0xb4, + 0xe6, 0xe2, 0x42, 0xd6, 0xe3, 0x92, 0x5f, 0x9e, 0xc7, 0xe5, 0xe2, 0xea, + 0xe3, 0x1a, 0xe2, 0xaa, 0xe0, 0x16, 0xe4, 0xef, 0x0b, 0xd3, 0x15, 0xee, + 0xe1, 0x1a, 0xe4, 0xaa, 0x90, 0x99, 0x62, 0x6b, 0x68, 0xcd, 0x05, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xf4, 0x3a, 0x7b, 0xae, 0x64, 0x00, 0x00, 0x00, + }, + "test-migrations/2_record.sql", + ) +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + return f() + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() ([]byte, error){ + "test-migrations/1_initial.sql": test_migrations_1_initial_sql, + "test-migrations/2_record.sql": test_migrations_2_record_sql, +} +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for name := range node.Children { + rv = append(rv, name) + } + return rv, nil +} + +type _bintree_t struct { + Func func() ([]byte, error) + Children map[string]*_bintree_t +} +var _bintree = &_bintree_t{nil, map[string]*_bintree_t{ + "test-migrations": &_bintree_t{nil, map[string]*_bintree_t{ + "1_initial.sql": &_bintree_t{test_migrations_1_initial_sql, map[string]*_bintree_t{ + }}, + "2_record.sql": &_bintree_t{test_migrations_2_record_sql, map[string]*_bintree_t{ + }}, + }}, +}} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/doc.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/doc.go new file mode 100644 index 00000000..63db39f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/doc.go @@ -0,0 +1,199 @@ +/* + +SQL Schema migration tool for Go. + +Key features: + + * Usable as a CLI tool or as a library + * Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp) + * Can embed migrations into your application + * Migrations are defined with SQL for full flexibility + * Atomic migrations + * Up/down migrations to allow rollback + * Supports multiple database types in one project + +Installation + +To install the library and command line program, use the following: + + go get github.com/rubenv/sql-migrate/... + +Command-line tool + +The main command is called sql-migrate. + + $ sql-migrate --help + usage: sql-migrate [--version] [--help] [] + + Available commands are: + down Undo a database migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available + +Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments: + + development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + + production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the -env flag (defaults to development). + +Use the --help flag in combination with any of the commands to get an overview of its usage: + + $ sql-migrate up --help + Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + + Options: + + -config=config.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. + +The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter. + +The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the status command to see the state of the applied migrations: + + $ sql-migrate status + +---------------+-----------------------------------------+ + | MIGRATION | APPLIED | + +---------------+-----------------------------------------+ + | 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | + | 2_record.sql | no | + +---------------+-----------------------------------------+ + +Library + +Import sql-migrate into your application: + + import "github.com/rubenv/sql-migrate" + +Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later): + + // Hardcoded strings in memory: + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, + } + + // OR: Read migrations from a folder: + migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", + } + + // OR: Use migrations from bindata: + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", + } + +Then use the Exec function to upgrade your database: + + db, err := sql.Open("sqlite3", filename) + if err != nil { + // Handle errors! + } + + n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) + if err != nil { + // Handle errors! + } + fmt.Printf("Applied %d migrations!\n", n) + +Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +The full set of capabilities can be found in the API docs below. + +Writing migrations + +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + + -- +migrate Up + -- SQL in section 'Up' is executed when this migration is applied + CREATE TABLE people (id int); + + + -- +migrate Down + -- SQL section 'Down' is executed when this migration is rolled back + DROP TABLE people; + +You can put multiple statements in each block, as long as you end them with a semicolon (;). + +If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries: + + -- +migrate Up + CREATE TABLE people (id int); + + -- +migrate StatementBegin + CREATE OR REPLACE FUNCTION do_something() + returns void AS $$ + DECLARE + create_query text; + BEGIN + -- Do something here + END; + $$ + language plpgsql; + -- +migrate StatementEnd + + -- +migrate Down + DROP FUNCTION do_something(); + DROP TABLE people; + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Embedding migrations with bindata + +If you like your Go applications self-contained (that is: a single binary): use bindata (https://github.com/jteeuwen/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a .go file with the migrations embedded: + + go-bindata -pkg myapp -o bindata.go db/migrations/ + +The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives). + +Use the AssetMigrationSource in your application to find the migrations: + + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", + } + +Both Asset and AssetDir are functions provided by bindata. + +Then proceed as usual. + +Extending + +Adding a new migration source means implementing MigrationSource. + + type MigrationSource interface { + FindMigrations() ([]*Migration, error) + } + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field. +*/ +package migrate diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/init_test.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/init_test.go new file mode 100644 index 00000000..ce1a1716 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/init_test.go @@ -0,0 +1,9 @@ +package migrate + +import ( + "testing" + + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { TestingT(t) } diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/migrate.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/migrate.go new file mode 100644 index 00000000..4f36982f --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/migrate.go @@ -0,0 +1,475 @@ +package migrate + +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "io" + "os" + "path" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/rubenv/sql-migrate/sqlparse" + "gopkg.in/gorp.v1" +) + +type MigrationDirection int + +const ( + Up MigrationDirection = iota + Down +) + +var tableName = "gorp_migrations" +var schemaName = "" +var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`) + +// Set the name of the table used to store migration info. +// +// Should be called before any other call such as (Exec, ExecMax, ...). +func SetTable(name string) { + if name != "" { + tableName = name + } +} + +// SetSchema sets the name of a schema that the migration table be referenced. +func SetSchema(name string) { + if name != "" { + schemaName = name + } +} + +func getTableName() string { + t := tableName + if schemaName != "" { + t = fmt.Sprintf("%s.%s", schemaName, t) + } + + return t +} + +type Migration struct { + Id string + Up []string + Down []string +} + +func (m Migration) Less(other *Migration) bool { + switch { + case m.isNumeric() && other.isNumeric(): + return m.VersionInt() < other.VersionInt() + case m.isNumeric() && !other.isNumeric(): + return true + case !m.isNumeric() && other.isNumeric(): + return false + default: + return m.Id < other.Id + } +} + +func (m Migration) isNumeric() bool { + return len(m.NumberPrefixMatches()) > 0 +} + +func (m Migration) NumberPrefixMatches() []string { + return numberPrefixRegex.FindStringSubmatch(m.Id) +} + +func (m Migration) VersionInt() int64 { + v := m.NumberPrefixMatches()[1] + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err)) + } + return value +} + +type PlannedMigration struct { + *Migration + Queries []string +} + +type byId []*Migration + +func (b byId) Len() int { return len(b) } +func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) } + +type MigrationRecord struct { + Id string `db:"id"` + AppliedAt time.Time `db:"applied_at"` +} + +var MigrationDialects = map[string]gorp.Dialect{ + "sqlite3": gorp.SqliteDialect{}, + "postgres": gorp.PostgresDialect{}, + "mysql": gorp.MySQLDialect{"InnoDB", "UTF8"}, + "mssql": gorp.SqlServerDialect{}, + "oci8": gorp.OracleDialect{}, +} + +type MigrationSource interface { + // Finds the migrations. + // + // The resulting slice of migrations should be sorted by Id. + FindMigrations() ([]*Migration, error) +} + +// A hardcoded set of migrations, in-memory. +type MemoryMigrationSource struct { + Migrations []*Migration +} + +var _ MigrationSource = (*MemoryMigrationSource)(nil) + +func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) { + // Make sure migrations are sorted + sort.Sort(byId(m.Migrations)) + + return m.Migrations, nil +} + +// A set of migrations loaded from a directory. +type FileMigrationSource struct { + Dir string +} + +var _ MigrationSource = (*FileMigrationSource)(nil) + +func (f FileMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + + file, err := os.Open(f.Dir) + if err != nil { + return nil, err + } + + files, err := file.Readdir(0) + if err != nil { + return nil, err + } + + for _, info := range files { + if strings.HasSuffix(info.Name(), ".sql") { + file, err := os.Open(path.Join(f.Dir, info.Name())) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(info.Name(), file) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Migrations from a bindata asset set. +type AssetMigrationSource struct { + // Asset should return content of file in path if exists + Asset func(path string) ([]byte, error) + + // AssetDir should return list of files in the path + AssetDir func(path string) ([]string, error) + + // Path in the bindata to use. + Dir string +} + +var _ MigrationSource = (*AssetMigrationSource)(nil) + +func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + + files, err := a.AssetDir(a.Dir) + if err != nil { + return nil, err + } + + for _, name := range files { + if strings.HasSuffix(name, ".sql") { + file, err := a.Asset(path.Join(a.Dir, name)) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Migration parsing +func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) { + m := &Migration{ + Id: id, + } + + up, err := sqlparse.SplitSQLStatements(r, true) + if err != nil { + return nil, err + } + + down, err := sqlparse.SplitSQLStatements(r, false) + if err != nil { + return nil, err + } + + m.Up = up + m.Down = down + + return m, nil +} + +// Execute a set of migrations +// +// Returns the number of applied migrations. +func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ExecMax(db, dialect, m, dir, 0) +} + +// Execute a set of migrations +// +// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec). +// +// Returns the number of applied migrations. +func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + + // Apply migrations + applied := 0 + for _, migration := range migrations { + trans, err := dbMap.Begin() + if err != nil { + return applied, err + } + + for _, stmt := range migration.Queries { + _, err := trans.Exec(stmt) + if err != nil { + trans.Rollback() + return applied, err + } + } + + if dir == Up { + err = trans.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + return applied, err + } + } else if dir == Down { + _, err := trans.Delete(&MigrationRecord{ + Id: migration.Id, + }) + if err != nil { + return applied, err + } + } else { + panic("Not possible") + } + + err = trans.Commit() + if err != nil { + return applied, err + } + + applied++ + } + + return applied, nil +} + +// Plan a migration. +func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + dbMap, err := getMigrationDbMap(db, dialect) + if err != nil { + return nil, nil, err + } + + migrations, err := m.FindMigrations() + if err != nil { + return nil, nil, err + } + + var migrationRecords []MigrationRecord + _, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", getTableName())) + if err != nil { + return nil, nil, err + } + + // Sort migrations that have been run by Id. + var existingMigrations []*Migration + for _, migrationRecord := range migrationRecords { + existingMigrations = append(existingMigrations, &Migration{ + Id: migrationRecord.Id, + }) + } + sort.Sort(byId(existingMigrations)) + + // Get last migration that was run + record := &Migration{} + if len(existingMigrations) > 0 { + record = existingMigrations[len(existingMigrations)-1] + } + + result := make([]*PlannedMigration, 0) + + // Add missing migrations up to the last run migration. + // This can happen for example when merges happened. + if len(existingMigrations) > 0 { + result = append(result, ToCatchup(migrations, existingMigrations, record)...) + } + + // Figure out which migrations to apply + toApply := ToApply(migrations, record.Id, dir) + toApplyCount := len(toApply) + if max > 0 && max < toApplyCount { + toApplyCount = max + } + for _, v := range toApply[0:toApplyCount] { + + if dir == Up { + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Up, + }) + } else if dir == Down { + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Down, + }) + } + } + + return result, dbMap, nil +} + +// Filter a slice of migrations into ones that should be applied. +func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration { + var index = -1 + if current != "" { + for index < len(migrations)-1 { + index++ + if migrations[index].Id == current { + break + } + } + } + + if direction == Up { + return migrations[index+1:] + } else if direction == Down { + if index == -1 { + return []*Migration{} + } + + // Add in reverse order + toApply := make([]*Migration, index+1) + for i := 0; i < index+1; i++ { + toApply[index-i] = migrations[i] + } + return toApply + } + + panic("Not possible") +} + +func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration { + missing := make([]*PlannedMigration, 0) + for _, migration := range migrations { + found := false + for _, existing := range existingMigrations { + if existing.Id == migration.Id { + found = true + break + } + } + if !found && migration.Less(lastRun) { + missing = append(missing, &PlannedMigration{Migration: migration, Queries: migration.Up}) + } + } + return missing +} + +func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + dbMap, err := getMigrationDbMap(db, dialect) + if err != nil { + return nil, err + } + + var records []*MigrationRecord + query := fmt.Sprintf("SELECT * FROM %s ORDER BY id ASC", getTableName()) + _, err = dbMap.Select(&records, query) + if err != nil { + return nil, err + } + + return records, nil +} + +func getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) { + d, ok := MigrationDialects[dialect] + if !ok { + return nil, fmt.Errorf("Unknown dialect: %s", dialect) + } + + // When using the mysql driver, make sure that the parseTime option is + // configured, otherwise it won't map time columns to time.Time. See + // https://github.com/rubenv/sql-migrate/issues/2 + if dialect == "mysql" { + var out *time.Time + err := db.QueryRow("SELECT NOW()").Scan(&out) + if err != nil { + if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" { + return nil, errors.New(`Cannot parse dates. + +Make sure that the parseTime option is supplied to your database connection. +Check https://github.com/go-sql-driver/mysql#parsetime for more info.`) + } else { + return nil, err + } + } + } + + // Create migration database map + dbMap := &gorp.DbMap{Db: db, Dialect: d} + dbMap.AddTableWithNameAndSchema(MigrationRecord{}, schemaName, tableName).SetKeys(false, "Id") + //dbMap.TraceOn("", log.New(os.Stdout, "migrate: ", log.Lmicroseconds)) + + err := dbMap.CreateTablesIfNotExists() + if err != nil { + return nil, err + } + + return dbMap, nil +} + +// TODO: Run migration + record insert in transaction. diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/migrate_test.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/migrate_test.go new file mode 100644 index 00000000..cbe72495 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/migrate_test.go @@ -0,0 +1,357 @@ +package migrate + +import ( + "database/sql" + "os" + + _ "github.com/mattn/go-sqlite3" + . "gopkg.in/check.v1" + "gopkg.in/gorp.v1" +) + +var filename = "/tmp/sql-migrate-sqlite.db" +var sqliteMigrations = []*Migration{ + &Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + &Migration{ + Id: "124", + Up: []string{"ALTER TABLE people ADD COLUMN first_name text"}, + Down: []string{"SELECT 0"}, // Not really supported + }, +} + +type SqliteMigrateSuite struct { + Db *sql.DB + DbMap *gorp.DbMap +} + +var _ = Suite(&SqliteMigrateSuite{}) + +func (s *SqliteMigrateSuite) SetUpTest(c *C) { + db, err := sql.Open("sqlite3", filename) + c.Assert(err, IsNil) + + s.Db = db + s.DbMap = &gorp.DbMap{Db: db, Dialect: &gorp.SqliteDialect{}} +} + +func (s *SqliteMigrateSuite) TearDownTest(c *C) { + err := os.Remove(filename) + c.Assert(err, IsNil) +} + +func (s *SqliteMigrateSuite) TestRunMigration(c *C) { + migrations := &MemoryMigrationSource{ + Migrations: sqliteMigrations[:1], + } + + // Executes one migration + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 1) + + // Can use table now + _, err = s.DbMap.Exec("SELECT * FROM people") + c.Assert(err, IsNil) + + // Shouldn't apply migration again + n, err = Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 0) +} + +func (s *SqliteMigrateSuite) TestMigrateMultiple(c *C) { + migrations := &MemoryMigrationSource{ + Migrations: sqliteMigrations[:2], + } + + // Executes two migrations + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) + + // Can use column now + _, err = s.DbMap.Exec("SELECT first_name FROM people") + c.Assert(err, IsNil) +} + +func (s *SqliteMigrateSuite) TestMigrateIncremental(c *C) { + migrations := &MemoryMigrationSource{ + Migrations: sqliteMigrations[:1], + } + + // Executes one migration + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 1) + + // Execute a new migration + migrations = &MemoryMigrationSource{ + Migrations: sqliteMigrations[:2], + } + n, err = Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 1) + + // Can use column now + _, err = s.DbMap.Exec("SELECT first_name FROM people") + c.Assert(err, IsNil) +} + +func (s *SqliteMigrateSuite) TestFileMigrate(c *C) { + migrations := &FileMigrationSource{ + Dir: "test-migrations", + } + + // Executes two migrations + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) + + // Has data + id, err := s.DbMap.SelectInt("SELECT id FROM people") + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(1)) +} + +func (s *SqliteMigrateSuite) TestAssetMigrate(c *C) { + migrations := &AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "test-migrations", + } + + // Executes two migrations + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) + + // Has data + id, err := s.DbMap.SelectInt("SELECT id FROM people") + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(1)) +} + +func (s *SqliteMigrateSuite) TestMigrateMax(c *C) { + migrations := &FileMigrationSource{ + Dir: "test-migrations", + } + + // Executes one migration + n, err := ExecMax(s.Db, "sqlite3", migrations, Up, 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, 1) + + id, err := s.DbMap.SelectInt("SELECT COUNT(*) FROM people") + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(0)) +} + +func (s *SqliteMigrateSuite) TestMigrateDown(c *C) { + migrations := &FileMigrationSource{ + Dir: "test-migrations", + } + + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) + + // Has data + id, err := s.DbMap.SelectInt("SELECT id FROM people") + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(1)) + + // Undo the last one + n, err = ExecMax(s.Db, "sqlite3", migrations, Down, 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, 1) + + // No more data + id, err = s.DbMap.SelectInt("SELECT COUNT(*) FROM people") + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(0)) + + // Remove the table. + n, err = ExecMax(s.Db, "sqlite3", migrations, Down, 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, 1) + + // Cannot query it anymore + _, err = s.DbMap.SelectInt("SELECT COUNT(*) FROM people") + c.Assert(err, Not(IsNil)) + + // Nothing left to do. + n, err = ExecMax(s.Db, "sqlite3", migrations, Down, 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, 0) +} + +func (s *SqliteMigrateSuite) TestMigrateDownFull(c *C) { + migrations := &FileMigrationSource{ + Dir: "test-migrations", + } + + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) + + // Has data + id, err := s.DbMap.SelectInt("SELECT id FROM people") + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(1)) + + // Undo the last one + n, err = Exec(s.Db, "sqlite3", migrations, Down) + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) + + // Cannot query it anymore + _, err = s.DbMap.SelectInt("SELECT COUNT(*) FROM people") + c.Assert(err, Not(IsNil)) + + // Nothing left to do. + n, err = Exec(s.Db, "sqlite3", migrations, Down) + c.Assert(err, IsNil) + c.Assert(n, Equals, 0) +} + +func (s *SqliteMigrateSuite) TestMigrateTransaction(c *C) { + migrations := &MemoryMigrationSource{ + Migrations: []*Migration{ + sqliteMigrations[0], + sqliteMigrations[1], + &Migration{ + Id: "125", + Up: []string{"INSERT INTO people (id, first_name) VALUES (1, 'Test')", "SELECT fail"}, + Down: []string{}, // Not important here + }, + }, + } + + // Should fail, transaction should roll back the INSERT. + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, Not(IsNil)) + c.Assert(n, Equals, 2) + + // INSERT should be rolled back + count, err := s.DbMap.SelectInt("SELECT COUNT(*) FROM people") + c.Assert(err, IsNil) + c.Assert(count, Equals, int64(0)) +} + +func (s *SqliteMigrateSuite) TestPlanMigration(c *C) { + migrations := &MemoryMigrationSource{ + Migrations: []*Migration{ + &Migration{ + Id: "1_create_table.sql", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + &Migration{ + Id: "2_alter_table.sql", + Up: []string{"ALTER TABLE people ADD COLUMN first_name text"}, + Down: []string{"SELECT 0"}, // Not really supported + }, + &Migration{ + Id: "10_add_last_name.sql", + Up: []string{"ALTER TABLE people ADD COLUMN last_name text"}, + Down: []string{"ALTER TABLE people DROP COLUMN last_name"}, + }, + }, + } + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + + migrations.Migrations = append(migrations.Migrations, &Migration{ + Id: "11_add_middle_name.sql", + Up: []string{"ALTER TABLE people ADD COLUMN middle_name text"}, + Down: []string{"ALTER TABLE people DROP COLUMN middle_name"}, + }) + + plannedMigrations, _, err := PlanMigration(s.Db, "sqlite3", migrations, Up, 0) + c.Assert(err, IsNil) + c.Assert(plannedMigrations, HasLen, 1) + c.Assert(plannedMigrations[0].Migration, Equals, migrations.Migrations[3]) + + plannedMigrations, _, err = PlanMigration(s.Db, "sqlite3", migrations, Down, 0) + c.Assert(err, IsNil) + c.Assert(plannedMigrations, HasLen, 3) + c.Assert(plannedMigrations[0].Migration, Equals, migrations.Migrations[2]) + c.Assert(plannedMigrations[1].Migration, Equals, migrations.Migrations[1]) + c.Assert(plannedMigrations[2].Migration, Equals, migrations.Migrations[0]) +} + +func (s *SqliteMigrateSuite) TestPlanMigrationWithHoles(c *C) { + up := "SELECT 0" + down := "SELECT 1" + migrations := &MemoryMigrationSource{ + Migrations: []*Migration{ + &Migration{ + Id: "1", + Up: []string{up}, + Down: []string{down}, + }, + &Migration{ + Id: "3", + Up: []string{up}, + Down: []string{down}, + }, + }, + } + n, err := Exec(s.Db, "sqlite3", migrations, Up) + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) + + migrations.Migrations = append(migrations.Migrations, &Migration{ + Id: "2", + Up: []string{up}, + Down: []string{down}, + }) + + migrations.Migrations = append(migrations.Migrations, &Migration{ + Id: "4", + Up: []string{up}, + Down: []string{down}, + }) + + migrations.Migrations = append(migrations.Migrations, &Migration{ + Id: "5", + Up: []string{up}, + Down: []string{down}, + }) + + // apply all the missing migrations + plannedMigrations, _, err := PlanMigration(s.Db, "sqlite3", migrations, Up, 0) + c.Assert(err, IsNil) + c.Assert(plannedMigrations, HasLen, 3) + c.Assert(plannedMigrations[0].Migration.Id, Equals, "2") + c.Assert(plannedMigrations[0].Queries[0], Equals, up) + c.Assert(plannedMigrations[1].Migration.Id, Equals, "4") + c.Assert(plannedMigrations[1].Queries[0], Equals, up) + c.Assert(plannedMigrations[2].Migration.Id, Equals, "5") + c.Assert(plannedMigrations[2].Queries[0], Equals, up) + + // first catch up to current target state 123, then migrate down 1 step to 12 + plannedMigrations, _, err = PlanMigration(s.Db, "sqlite3", migrations, Down, 1) + c.Assert(err, IsNil) + c.Assert(plannedMigrations, HasLen, 2) + c.Assert(plannedMigrations[0].Migration.Id, Equals, "2") + c.Assert(plannedMigrations[0].Queries[0], Equals, up) + c.Assert(plannedMigrations[1].Migration.Id, Equals, "3") + c.Assert(plannedMigrations[1].Queries[0], Equals, down) + + // first catch up to current target state 123, then migrate down 2 steps to 1 + plannedMigrations, _, err = PlanMigration(s.Db, "sqlite3", migrations, Down, 2) + c.Assert(err, IsNil) + c.Assert(plannedMigrations, HasLen, 3) + c.Assert(plannedMigrations[0].Migration.Id, Equals, "2") + c.Assert(plannedMigrations[0].Queries[0], Equals, up) + c.Assert(plannedMigrations[1].Migration.Id, Equals, "3") + c.Assert(plannedMigrations[1].Queries[0], Equals, down) + c.Assert(plannedMigrations[2].Migration.Id, Equals, "2") + c.Assert(plannedMigrations[2].Queries[0], Equals, down) +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sort_test.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sort_test.go new file mode 100644 index 00000000..b911260c --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sort_test.go @@ -0,0 +1,34 @@ +package migrate + +import ( + "sort" + . "gopkg.in/check.v1" +) + +type SortSuite struct{} + +var _ = Suite(&SortSuite{}) + +func (s *SortSuite) TestSortMigrations(c *C) { + var migrations = byId([]*Migration{ + &Migration{Id: "10_abc", Up: nil, Down: nil}, + &Migration{Id: "120_cde", Up: nil, Down: nil}, + &Migration{Id: "1_abc", Up: nil, Down: nil}, + &Migration{Id: "efg", Up: nil, Down: nil}, + &Migration{Id: "2_cde", Up: nil, Down: nil}, + &Migration{Id: "35_cde", Up: nil, Down: nil}, + &Migration{Id: "3_efg", Up: nil, Down: nil}, + &Migration{Id: "4_abc", Up: nil, Down: nil}, + }) + + sort.Sort(migrations) + c.Assert(migrations, HasLen, 8) + c.Assert(migrations[0].Id, Equals, "1_abc") + c.Assert(migrations[1].Id, Equals, "2_cde") + c.Assert(migrations[2].Id, Equals, "3_efg") + c.Assert(migrations[3].Id, Equals, "4_abc") + c.Assert(migrations[4].Id, Equals, "10_abc") + c.Assert(migrations[5].Id, Equals, "35_cde") + c.Assert(migrations[6].Id, Equals, "120_cde") + c.Assert(migrations[7].Id, Equals, "efg") +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_common.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_common.go new file mode 100644 index 00000000..e5949c6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_common.go @@ -0,0 +1,63 @@ +package main + +import ( + "fmt" + + "github.com/rubenv/sql-migrate" +) + +func ApplyMigrations(dir migrate.MigrationDirection, dryrun bool, limit int) error { + env, err := GetEnvironment() + if err != nil { + return fmt.Errorf("Could not parse config: %s", err) + } + + db, dialect, err := GetConnection(env) + if err != nil { + return err + } + + source := migrate.FileMigrationSource{ + Dir: env.Dir, + } + + if dryrun { + migrations, _, err := migrate.PlanMigration(db, dialect, source, dir, limit) + if err != nil { + return fmt.Errorf("Cannot plan migration: %s", err) + } + + for _, m := range migrations { + PrintMigration(m, dir) + } + } else { + n, err := migrate.ExecMax(db, dialect, source, dir, limit) + if err != nil { + return fmt.Errorf("Migration failed: %s", err) + } + + if n == 1 { + ui.Output("Applied 1 migration") + } else { + ui.Output(fmt.Sprintf("Applied %d migrations", n)) + } + } + + return nil +} + +func PrintMigration(m *migrate.PlannedMigration, dir migrate.MigrationDirection) { + if dir == migrate.Up { + ui.Output(fmt.Sprintf("==> Would apply migration %s (up)", m.Id)) + for _, q := range m.Up { + ui.Output(q) + } + } else if dir == migrate.Down { + ui.Output(fmt.Sprintf("==> Would apply migration %s (down)", m.Id)) + for _, q := range m.Down { + ui.Output(q) + } + } else { + panic("Not reached") + } +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_down.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_down.go new file mode 100644 index 00000000..232eac40 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_down.go @@ -0,0 +1,55 @@ +package main + +import ( + "flag" + "strings" + + "github.com/rubenv/sql-migrate" +) + +type DownCommand struct { +} + +func (c *DownCommand) Help() string { + helpText := ` +Usage: sql-migrate down [options] ... + + Undo a database migration. + +Options: + + -config=dbconfig.yml Configuration file to use. + -env="development" Environment. + -limit=1 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. + +` + return strings.TrimSpace(helpText) +} + +func (c *DownCommand) Synopsis() string { + return "Undo a database migration" +} + +func (c *DownCommand) Run(args []string) int { + var limit int + var dryrun bool + + cmdFlags := flag.NewFlagSet("down", flag.ContinueOnError) + cmdFlags.Usage = func() { ui.Output(c.Help()) } + cmdFlags.IntVar(&limit, "limit", 1, "Max number of migrations to apply.") + cmdFlags.BoolVar(&dryrun, "dryrun", false, "Don't apply migrations, just print them.") + ConfigFlags(cmdFlags) + + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + err := ApplyMigrations(migrate.Down, dryrun, limit) + if err != nil { + ui.Error(err.Error()) + return 1 + } + + return 0 +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_redo.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_redo.go new file mode 100644 index 00000000..ccc760fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_redo.go @@ -0,0 +1,88 @@ +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/rubenv/sql-migrate" +) + +type RedoCommand struct { +} + +func (c *RedoCommand) Help() string { + helpText := ` +Usage: sql-migrate redo [options] ... + + Reapply the last migration. + +Options: + + -config=dbconfig.yml Configuration file to use. + -env="development" Environment. + -dryrun Don't apply migrations, just print them. + +` + return strings.TrimSpace(helpText) +} + +func (c *RedoCommand) Synopsis() string { + return "Reapply the last migration" +} + +func (c *RedoCommand) Run(args []string) int { + var dryrun bool + + cmdFlags := flag.NewFlagSet("redo", flag.ContinueOnError) + cmdFlags.Usage = func() { ui.Output(c.Help()) } + cmdFlags.BoolVar(&dryrun, "dryrun", false, "Don't apply migrations, just print them.") + ConfigFlags(cmdFlags) + + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + env, err := GetEnvironment() + if err != nil { + ui.Error(fmt.Sprintf("Could not parse config: %s", err)) + return 1 + } + + db, dialect, err := GetConnection(env) + if err != nil { + ui.Error(err.Error()) + return 1 + } + + source := migrate.FileMigrationSource{ + Dir: env.Dir, + } + + migrations, _, err := migrate.PlanMigration(db, dialect, source, migrate.Down, 1) + if len(migrations) == 0 { + ui.Output("Nothing to do!") + return 0 + } + + if dryrun { + PrintMigration(migrations[0], migrate.Down) + PrintMigration(migrations[0], migrate.Up) + } else { + _, err := migrate.ExecMax(db, dialect, source, migrate.Down, 1) + if err != nil { + ui.Error(fmt.Sprintf("Migration (down) failed: %s", err)) + return 1 + } + + _, err = migrate.ExecMax(db, dialect, source, migrate.Up, 1) + if err != nil { + ui.Error(fmt.Sprintf("Migration (up) failed: %s", err)) + return 1 + } + + ui.Output(fmt.Sprintf("Reapplied migration %s.", migrations[0].Id)) + } + + return 0 +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_status.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_status.go new file mode 100644 index 00000000..bc2ca70d --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_status.go @@ -0,0 +1,113 @@ +package main + +import ( + "flag" + "fmt" + "os" + "strings" + "time" + + "github.com/olekukonko/tablewriter" + "github.com/rubenv/sql-migrate" +) + +type StatusCommand struct { +} + +func (c *StatusCommand) Help() string { + helpText := ` +Usage: sql-migrate status [options] ... + + Show migration status. + +Options: + + -config=dbconfig.yml Configuration file to use. + -env="development" Environment. + +` + return strings.TrimSpace(helpText) +} + +func (c *StatusCommand) Synopsis() string { + return "Show migration status" +} + +func (c *StatusCommand) Run(args []string) int { + cmdFlags := flag.NewFlagSet("status", flag.ContinueOnError) + cmdFlags.Usage = func() { ui.Output(c.Help()) } + ConfigFlags(cmdFlags) + + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + env, err := GetEnvironment() + if err != nil { + ui.Error(fmt.Sprintf("Could not parse config: %s", err)) + return 1 + } + + db, dialect, err := GetConnection(env) + if err != nil { + ui.Error(err.Error()) + return 1 + } + + source := migrate.FileMigrationSource{ + Dir: env.Dir, + } + migrations, err := source.FindMigrations() + if err != nil { + ui.Error(err.Error()) + return 1 + } + + records, err := migrate.GetMigrationRecords(db, dialect) + if err != nil { + ui.Error(err.Error()) + return 1 + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Migration", "Applied"}) + table.SetColWidth(60) + + rows := make(map[string]*statusRow) + + for _, m := range migrations { + rows[m.Id] = &statusRow{ + Id: m.Id, + Migrated: false, + } + } + + for _, r := range records { + rows[r.Id].Migrated = true + rows[r.Id].AppliedAt = r.AppliedAt + } + + for _, m := range migrations { + if rows[m.Id].Migrated { + table.Append([]string{ + m.Id, + rows[m.Id].AppliedAt.String(), + }) + } else { + table.Append([]string{ + m.Id, + "no", + }) + } + } + + table.Render() + + return 0 +} + +type statusRow struct { + Id string + Migrated bool + AppliedAt time.Time +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_up.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_up.go new file mode 100644 index 00000000..9e5a291f --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/command_up.go @@ -0,0 +1,55 @@ +package main + +import ( + "flag" + "strings" + + "github.com/rubenv/sql-migrate" +) + +type UpCommand struct { +} + +func (c *UpCommand) Help() string { + helpText := ` +Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + +Options: + + -config=dbconfig.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. + +` + return strings.TrimSpace(helpText) +} + +func (c *UpCommand) Synopsis() string { + return "Migrates the database to the most recent version available" +} + +func (c *UpCommand) Run(args []string) int { + var limit int + var dryrun bool + + cmdFlags := flag.NewFlagSet("up", flag.ContinueOnError) + cmdFlags.Usage = func() { ui.Output(c.Help()) } + cmdFlags.IntVar(&limit, "limit", 0, "Max number of migrations to apply.") + cmdFlags.BoolVar(&dryrun, "dryrun", false, "Don't apply migrations, just print them.") + ConfigFlags(cmdFlags) + + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + err := ApplyMigrations(migrate.Up, dryrun, limit) + if err != nil { + ui.Error(err.Error()) + return 1 + } + + return 0 +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/config.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/config.go new file mode 100644 index 00000000..d01212d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/config.go @@ -0,0 +1,103 @@ +package main + +import ( + "database/sql" + "errors" + "flag" + "fmt" + "io/ioutil" + + "github.com/rubenv/sql-migrate" + "gopkg.in/gorp.v1" + "gopkg.in/yaml.v1" + + _ "github.com/go-sql-driver/mysql" + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" +) + +var dialects = map[string]gorp.Dialect{ + "sqlite3": gorp.SqliteDialect{}, + "postgres": gorp.PostgresDialect{}, + "mysql": gorp.MySQLDialect{"InnoDB", "UTF8"}, +} + +var ConfigFile string +var ConfigEnvironment string + +func ConfigFlags(f *flag.FlagSet) { + f.StringVar(&ConfigFile, "config", "dbconfig.yml", "Configuration file to use.") + f.StringVar(&ConfigEnvironment, "env", "development", "Environment to use.") +} + +type Environment struct { + Dialect string `yaml:"dialect"` + DataSource string `yaml:"datasource"` + Dir string `yaml:"dir"` + TableName string `yaml:"table"` + SchemaName string `yaml:"schema"` +} + +func ReadConfig() (map[string]*Environment, error) { + file, err := ioutil.ReadFile(ConfigFile) + if err != nil { + return nil, err + } + + config := make(map[string]*Environment) + err = yaml.Unmarshal(file, config) + if err != nil { + return nil, err + } + + return config, nil +} + +func GetEnvironment() (*Environment, error) { + config, err := ReadConfig() + if err != nil { + return nil, err + } + + env := config[ConfigEnvironment] + if env == nil { + return nil, errors.New("No environment: " + ConfigEnvironment) + } + + if env.Dialect == "" { + return nil, errors.New("No dialect specified") + } + + if env.DataSource == "" { + return nil, errors.New("No data source specified") + } + + if env.Dir == "" { + env.Dir = "migrations" + } + + if env.TableName != "" { + migrate.SetTable(env.TableName) + } + + if env.SchemaName != "" { + migrate.SetSchema(env.SchemaName) + } + + return env, nil +} + +func GetConnection(env *Environment) (*sql.DB, string, error) { + db, err := sql.Open(env.Dialect, env.DataSource) + if err != nil { + return nil, "", fmt.Errorf("Cannot connect to database: %s", err) + } + + // Make sure we only accept dialects that were compiled in. + _, exists := dialects[env.Dialect] + if !exists { + return nil, "", fmt.Errorf("Unsupported dialect: %s", env.Dialect) + } + + return db, env.Dialect, nil +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/main.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/main.go new file mode 100644 index 00000000..c26afca2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "os" + + "github.com/mitchellh/cli" +) + +func main() { + os.Exit(realMain()) +} + +var ui cli.Ui + +func realMain() int { + ui = &cli.BasicUi{Writer: os.Stdout} + + cli := &cli.CLI{ + Args: os.Args[1:], + Commands: map[string]cli.CommandFactory{ + "up": func() (cli.Command, error) { + return &UpCommand{}, nil + }, + "down": func() (cli.Command, error) { + return &DownCommand{}, nil + }, + "redo": func() (cli.Command, error) { + return &RedoCommand{}, nil + }, + "status": func() (cli.Command, error) { + return &StatusCommand{}, nil + }, + }, + HelpFunc: cli.BasicHelpFunc("sql-migrate"), + Version: "1.0.0", + } + + exitCode, err := cli.Run() + if err != nil { + fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err.Error()) + return 1 + } + + return exitCode +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/main_test.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/main_test.go new file mode 100644 index 00000000..06ab7d0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/main_test.go @@ -0,0 +1 @@ +package main diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/mssql.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/mssql.go new file mode 100644 index 00000000..2e7918af --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sql-migrate/mssql.go @@ -0,0 +1,12 @@ +// +build go1.3 + +package main + +import ( + _ "github.com/denisenkom/go-mssqldb" + "gopkg.in/gorp.v1" +) + +func init() { + dialects["mssql"] = gorp.SqlServerDialect{} +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/README.md b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/README.md new file mode 100644 index 00000000..a41706ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/README.md @@ -0,0 +1,28 @@ +# SQL migration parser + +Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser. + +## License + + (The MIT License) + + Copyright (C) 2014 by Ruben Vermeersch + Copyright (C) 2012-2014 by Liam Staskawicz + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go new file mode 100644 index 00000000..df95011e --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go @@ -0,0 +1,128 @@ +package sqlparse + +import ( + "bufio" + "bytes" + "errors" + "io" + + "strings" +) + +const sqlCmdPrefix = "-- +migrate " + +// Checks the line to see if the line has a statement-ending semicolon +// or if the line contains a double-dash comment. +func endsWithSemicolon(line string) bool { + + prev := "" + scanner := bufio.NewScanner(strings.NewReader(line)) + scanner.Split(bufio.ScanWords) + + for scanner.Scan() { + word := scanner.Text() + if strings.HasPrefix(word, "--") { + break + } + prev = word + } + + return strings.HasSuffix(prev, ";") +} + +// Split the given sql script into individual statements. +// +// The base case is to simply split on semicolons, as these +// naturally terminate a statement. +// +// However, more complex cases like pl/pgsql can have semicolons +// within a statement. For these cases, we provide the explicit annotations +// 'StatementBegin' and 'StatementEnd' to allow the script to +// tell us to ignore semicolons. +func SplitSQLStatements(r io.ReadSeeker, direction bool) ([]string, error) { + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + scanner := bufio.NewScanner(r) + + // track the count of each section + // so we can diagnose scripts with no annotations + upSections := 0 + downSections := 0 + + statementEnded := false + ignoreSemicolons := false + directionIsActive := false + + stmts := make([]string, 0) + + for scanner.Scan() { + + line := scanner.Text() + + // handle any migrate-specific commands + if strings.HasPrefix(line, sqlCmdPrefix) { + cmd := strings.TrimSpace(line[len(sqlCmdPrefix):]) + switch cmd { + case "Up": + directionIsActive = (direction == true) + upSections++ + break + + case "Down": + directionIsActive = (direction == false) + downSections++ + break + + case "StatementBegin": + if directionIsActive { + ignoreSemicolons = true + } + break + + case "StatementEnd": + if directionIsActive { + statementEnded = (ignoreSemicolons == true) + ignoreSemicolons = false + } + break + } + } + + if !directionIsActive { + continue + } + + if _, err := buf.WriteString(line + "\n"); err != nil { + return nil, err + } + + // Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement + // Lines that end with semicolon that are in a statement block + // do not conclude statement. + if (!ignoreSemicolons && endsWithSemicolon(line)) || statementEnded { + statementEnded = false + stmts = append(stmts, buf.String()) + buf.Reset() + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // diagnose likely migration script errors + if ignoreSemicolons { + return nil, errors.New("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'") + } + + if upSections == 0 && downSections == 0 { + return nil, errors.New(`ERROR: no Up/Down annotations found, so no statements were executed. + See https://github.com/rubenv/sql-migrate for details.`) + } + + return stmts, nil +} diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/sqlparse_test.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/sqlparse_test.go new file mode 100644 index 00000000..4076e2c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/sqlparse/sqlparse_test.go @@ -0,0 +1,151 @@ +package sqlparse + +import ( + "strings" + "testing" + + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { TestingT(t) } + +type SqlParseSuite struct { +} + +var _ = Suite(&SqlParseSuite{}) + +func (s *SqlParseSuite) TestSemicolons(c *C) { + type testData struct { + line string + result bool + } + + tests := []testData{ + { + line: "END;", + result: true, + }, + { + line: "END; -- comment", + result: true, + }, + { + line: "END ; -- comment", + result: true, + }, + { + line: "END -- comment", + result: false, + }, + { + line: "END -- comment ;", + result: false, + }, + { + line: "END \" ; \" -- comment", + result: false, + }, + } + + for _, test := range tests { + r := endsWithSemicolon(test.line) + c.Assert(r, Equals, test.result) + } +} + +func (s *SqlParseSuite) TestSplitStatements(c *C) { + type testData struct { + sql string + direction bool + count int + } + + tests := []testData{ + { + sql: functxt, + direction: true, + count: 2, + }, + { + sql: functxt, + direction: false, + count: 2, + }, + { + sql: multitxt, + direction: true, + count: 2, + }, + { + sql: multitxt, + direction: false, + count: 2, + }, + } + + for _, test := range tests { + stmts, err := SplitSQLStatements(strings.NewReader(test.sql), test.direction) + c.Assert(err, IsNil) + c.Assert(stmts, HasLen, test.count) + } +} + +var functxt = `-- +migrate Up +CREATE TABLE IF NOT EXISTS histories ( + id BIGSERIAL PRIMARY KEY, + current_value varchar(2000) NOT NULL, + created_at timestamp with time zone NOT NULL +); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION histories_partition_creation( DATE, DATE ) +returns void AS $$ +DECLARE + create_query text; +BEGIN + FOR create_query IN SELECT + 'CREATE TABLE IF NOT EXISTS histories_' + || TO_CHAR( d, 'YYYY_MM' ) + || ' ( CHECK( created_at >= timestamp ''' + || TO_CHAR( d, 'YYYY-MM-DD 00:00:00' ) + || ''' AND created_at < timestamp ''' + || TO_CHAR( d + INTERVAL '1 month', 'YYYY-MM-DD 00:00:00' ) + || ''' ) ) inherits ( histories );' + FROM generate_series( $1, $2, '1 month' ) AS d + LOOP + EXECUTE create_query; + END LOOP; -- LOOP END +END; -- FUNCTION END +$$ +language plpgsql; +-- +migrate StatementEnd + +-- +migrate Down +drop function histories_partition_creation(DATE, DATE); +drop TABLE histories; +` + +// test multiple up/down transitions in a single script +var multitxt = `-- +migrate Up +CREATE TABLE post ( + id int NOT NULL, + title text, + body text, + PRIMARY KEY(id) +); + +-- +migrate Down +DROP TABLE post; + +-- +migrate Up +CREATE TABLE fancier_post ( + id int NOT NULL, + title text, + body text, + created_on timestamp without time zone, + PRIMARY KEY(id) +); + +-- +migrate Down +DROP TABLE fancier_post; +` diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/dbconfig.yml b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/dbconfig.yml new file mode 100644 index 00000000..6886e004 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/dbconfig.yml @@ -0,0 +1,20 @@ +postgres: + dialect: postgres + datasource: dbname=test sslmode=disable + dir: test-migrations + +mysql: + dialect: mysql + datasource: root@/test?parseTime=true + dir: test-migrations + +mysql_noflag: + dialect: mysql + datasource: root@/test + dir: test-migrations + +sqlite: + dialect: sqlite3 + datasource: test.db + dir: test-migrations + table: migrations diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/mysql-flag.sh b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/mysql-flag.sh new file mode 100644 index 00000000..3e0c940e --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/mysql-flag.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Tweak PATH for Travis +export PATH=$PATH:$HOME/gopath/bin + +OPTIONS="-config=test-integration/dbconfig.yml -env mysql_noflag" + +set -ex + +sql-migrate status $OPTIONS | grep -q "Make sure that the parseTime option is supplied" diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/mysql.sh b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/mysql.sh new file mode 100644 index 00000000..e209c3b7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/mysql.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Tweak PATH for Travis +export PATH=$PATH:$HOME/gopath/bin + +OPTIONS="-config=test-integration/dbconfig.yml -env mysql" + +set -ex + +sql-migrate status $OPTIONS +sql-migrate up $OPTIONS +sql-migrate down $OPTIONS +sql-migrate redo $OPTIONS +sql-migrate status $OPTIONS diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/postgres.sh b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/postgres.sh new file mode 100644 index 00000000..55a565ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/postgres.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Tweak PATH for Travis +export PATH=$PATH:$HOME/gopath/bin + +OPTIONS="-config=test-integration/dbconfig.yml -env postgres" + +set -ex + +sql-migrate status $OPTIONS +sql-migrate up $OPTIONS +sql-migrate down $OPTIONS +sql-migrate redo $OPTIONS +sql-migrate status $OPTIONS diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/sqlite.sh b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/sqlite.sh new file mode 100644 index 00000000..210099de --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-integration/sqlite.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Tweak PATH for Travis +export PATH=$PATH:$HOME/gopath/bin + +OPTIONS="-config=test-integration/dbconfig.yml -env sqlite" + +set -ex + +sql-migrate status $OPTIONS +sql-migrate up $OPTIONS +sql-migrate down $OPTIONS +sql-migrate redo $OPTIONS +sql-migrate status $OPTIONS + +# Should have used the custom migrations table +sqlite3 test.db "SELECT COUNT(*) FROM migrations" diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-migrations/1_initial.sql b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-migrations/1_initial.sql new file mode 100644 index 00000000..cd896fbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-migrations/1_initial.sql @@ -0,0 +1,8 @@ +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied +CREATE TABLE people (id int); + + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back +DROP TABLE people; diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-migrations/2_record.sql b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-migrations/2_record.sql new file mode 100644 index 00000000..c76d7691 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/test-migrations/2_record.sql @@ -0,0 +1,5 @@ +-- +migrate Up +INSERT INTO people (id) VALUES (1); + +-- +migrate Down +DELETE FROM people WHERE id=1; diff --git a/Godeps/_workspace/src/github.com/rubenv/sql-migrate/toapply_test.go b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/toapply_test.go new file mode 100644 index 00000000..6206f59b --- /dev/null +++ b/Godeps/_workspace/src/github.com/rubenv/sql-migrate/toapply_test.go @@ -0,0 +1,101 @@ +package migrate + +import ( + "sort" + . "gopkg.in/check.v1" +) + +var toapplyMigrations = []*Migration{ + &Migration{Id: "abc", Up: nil, Down: nil}, + &Migration{Id: "cde", Up: nil, Down: nil}, + &Migration{Id: "efg", Up: nil, Down: nil}, +} + +type ToApplyMigrateSuite struct { +} + +var _ = Suite(&ToApplyMigrateSuite{}) + +func (s *ToApplyMigrateSuite) TestGetAll(c *C) { + toApply := ToApply(toapplyMigrations, "", Up) + c.Assert(toApply, HasLen, 3) + c.Assert(toApply[0], Equals, toapplyMigrations[0]) + c.Assert(toApply[1], Equals, toapplyMigrations[1]) + c.Assert(toApply[2], Equals, toapplyMigrations[2]) +} + +func (s *ToApplyMigrateSuite) TestGetAbc(c *C) { + toApply := ToApply(toapplyMigrations, "abc", Up) + c.Assert(toApply, HasLen, 2) + c.Assert(toApply[0], Equals, toapplyMigrations[1]) + c.Assert(toApply[1], Equals, toapplyMigrations[2]) +} + +func (s *ToApplyMigrateSuite) TestGetCde(c *C) { + toApply := ToApply(toapplyMigrations, "cde", Up) + c.Assert(toApply, HasLen, 1) + c.Assert(toApply[0], Equals, toapplyMigrations[2]) +} + +func (s *ToApplyMigrateSuite) TestGetDone(c *C) { + toApply := ToApply(toapplyMigrations, "efg", Up) + c.Assert(toApply, HasLen, 0) + + toApply = ToApply(toapplyMigrations, "zzz", Up) + c.Assert(toApply, HasLen, 0) +} + +func (s *ToApplyMigrateSuite) TestDownDone(c *C) { + toApply := ToApply(toapplyMigrations, "", Down) + c.Assert(toApply, HasLen, 0) +} + +func (s *ToApplyMigrateSuite) TestDownCde(c *C) { + toApply := ToApply(toapplyMigrations, "cde", Down) + c.Assert(toApply, HasLen, 2) + c.Assert(toApply[0], Equals, toapplyMigrations[1]) + c.Assert(toApply[1], Equals, toapplyMigrations[0]) +} + +func (s *ToApplyMigrateSuite) TestDownAbc(c *C) { + toApply := ToApply(toapplyMigrations, "abc", Down) + c.Assert(toApply, HasLen, 1) + c.Assert(toApply[0], Equals, toapplyMigrations[0]) +} + +func (s *ToApplyMigrateSuite) TestDownAll(c *C) { + toApply := ToApply(toapplyMigrations, "efg", Down) + c.Assert(toApply, HasLen, 3) + c.Assert(toApply[0], Equals, toapplyMigrations[2]) + c.Assert(toApply[1], Equals, toapplyMigrations[1]) + c.Assert(toApply[2], Equals, toapplyMigrations[0]) + + toApply = ToApply(toapplyMigrations, "zzz", Down) + c.Assert(toApply, HasLen, 3) + c.Assert(toApply[0], Equals, toapplyMigrations[2]) + c.Assert(toApply[1], Equals, toapplyMigrations[1]) + c.Assert(toApply[2], Equals, toapplyMigrations[0]) +} + +func (s *ToApplyMigrateSuite) TestAlphaNumericMigrations(c *C) { + var migrations = byId([]*Migration{ + &Migration{Id: "10_abc", Up: nil, Down: nil}, + &Migration{Id: "1_abc", Up: nil, Down: nil}, + &Migration{Id: "efg", Up: nil, Down: nil}, + &Migration{Id: "2_cde", Up: nil, Down: nil}, + &Migration{Id: "35_cde", Up: nil, Down: nil}, + }) + + sort.Sort(migrations) + + toApplyUp := ToApply(migrations, "2_cde", Up) + c.Assert(toApplyUp, HasLen, 3) + c.Assert(toApplyUp[0].Id, Equals, "10_abc") + c.Assert(toApplyUp[1].Id, Equals, "35_cde") + c.Assert(toApplyUp[2].Id, Equals, "efg") + + toApplyDown := ToApply(migrations, "2_cde", Down) + c.Assert(toApplyDown, HasLen, 2) + c.Assert(toApplyDown[0].Id, Equals, "2_cde") + c.Assert(toApplyDown[1].Id, Equals, "1_abc") +} diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/gorp.v1/.gitignore new file mode 100644 index 00000000..8a06adea --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/.gitignore @@ -0,0 +1,8 @@ +_test +_testmain.go +_obj +*~ +*.6 +6.out +gorptest.bin +tmp diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/gorp.v1/.travis.yml new file mode 100644 index 00000000..3fa139d6 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/.travis.yml @@ -0,0 +1,21 @@ +language: go +go: + - 1.1 + - tip + +services: + - mysql + - postgres + - sqlite3 + +before_script: + - mysql -e "CREATE DATABASE gorptest;" + - mysql -u root -e "GRANT ALL ON gorptest.* TO gorptest@localhost IDENTIFIED BY 'gorptest'" + - psql -c "CREATE DATABASE gorptest;" -U postgres + - psql -c "CREATE USER "gorptest" WITH SUPERUSER PASSWORD 'gorptest';" -U postgres + - go get github.com/lib/pq + - go get github.com/mattn/go-sqlite3 + - go get github.com/ziutek/mymysql/godrv + - go get github.com/go-sql-driver/mysql + +script: ./test_all.sh diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/gorp.v1/LICENSE new file mode 100644 index 00000000..b661111d --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2012 James Cooper + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/Makefile b/Godeps/_workspace/src/gopkg.in/gorp.v1/Makefile new file mode 100644 index 00000000..edf771c1 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/Makefile @@ -0,0 +1,6 @@ +include $(GOROOT)/src/Make.inc + +TARG = github.com/coopernurse/gorp +GOFILES = gorp.go dialect.go + +include $(GOROOT)/src/Make.pkg \ No newline at end of file diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/README.md b/Godeps/_workspace/src/gopkg.in/gorp.v1/README.md new file mode 100644 index 00000000..c7cd5d8a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/README.md @@ -0,0 +1,672 @@ +# Go Relational Persistence + +[![build status](https://secure.travis-ci.org/go-gorp/gorp.png)](http://travis-ci.org/go-gorp/gorp) + +I hesitate to call gorp an ORM. Go doesn't really have objects, at least +not in the classic Smalltalk/Java sense. There goes the "O". gorp doesn't +know anything about the relationships between your structs (at least not +yet). So the "R" is questionable too (but I use it in the name because, +well, it seemed more clever). + +The "M" is alive and well. Given some Go structs and a database, gorp +should remove a fair amount of boilerplate busy-work from your code. + +I hope that gorp saves you time, minimizes the drudgery of getting data +in and out of your database, and helps your code focus on algorithms, +not infrastructure. + +* Bind struct fields to table columns via API or tag +* Support for embedded structs +* Support for transactions +* Forward engineer db schema from structs (great for unit tests) +* Pre/post insert/update/delete hooks +* Automatically generate insert/update/delete statements for a struct +* Automatic binding of auto increment PKs back to struct after insert +* Delete by primary key(s) +* Select by primary key(s) +* Optional trace sql logging +* Bind arbitrary SQL queries to a struct +* Bind slice to SELECT query results without type assertions +* Use positional or named bind parameters in custom SELECT queries +* Optional optimistic locking using a version column (for update/deletes) + +## Installation + + # install the library: + go get gopkg.in/gorp.v1 + + // use in your .go code: + import ( + "gopkg.in/gorp.v1" + ) + +## Versioning + +This project provides a stable release (v1.x tags) and a bleeding edge codebase (master). + +`gopkg.in/gorp.v1` points to the latest v1.x tag. The API's for v1 are stable and shouldn't change. Development takes place at the master branch. Althought the code in master should always compile and test successfully, it might break API's. We aim to maintain backwards compatibility, but API's and behaviour might be changed to fix a bug. Also note that API's that are new in the master branch can change until released as v2. + +If you want to use bleeding edge, use `github.com/go-gorp/gorp` as import path. + +## API Documentation + +Full godoc output from the latest v1 release is available here: + +https://godoc.org/gopkg.in/gorp.v1 + +For the latest code in master: + +https://godoc.org/github.com/go-gorp/gorp + +## Quickstart + +```go +package main + +import ( + "database/sql" + "gopkg.in/gorp.v1" + _ "github.com/mattn/go-sqlite3" + "log" + "time" +) + +func main() { + // initialize the DbMap + dbmap := initDb() + defer dbmap.Db.Close() + + // delete any existing rows + err := dbmap.TruncateTables() + checkErr(err, "TruncateTables failed") + + // create two posts + p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum") + p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum") + + // insert rows - auto increment PKs will be set properly after the insert + err = dbmap.Insert(&p1, &p2) + checkErr(err, "Insert failed") + + // use convenience SelectInt + count, err := dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Rows after inserting:", count) + + // update a row + p2.Title = "Go 1.2 is better than ever" + count, err = dbmap.Update(&p2) + checkErr(err, "Update failed") + log.Println("Rows updated:", count) + + // fetch one row - note use of "post_id" instead of "Id" since column is aliased + // + // Postgres users should use $1 instead of ? placeholders + // See 'Known Issues' below + // + err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id) + checkErr(err, "SelectOne failed") + log.Println("p2 row:", p2) + + // fetch all rows + var posts []Post + _, err = dbmap.Select(&posts, "select * from posts order by post_id") + checkErr(err, "Select failed") + log.Println("All rows:") + for x, p := range posts { + log.Printf(" %d: %v\n", x, p) + } + + // delete row by PK + count, err = dbmap.Delete(&p1) + checkErr(err, "Delete failed") + log.Println("Rows deleted:", count) + + // delete row manually via Exec + _, err = dbmap.Exec("delete from posts where post_id=?", p2.Id) + checkErr(err, "Exec failed") + + // confirm count is zero + count, err = dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Row count - should be zero:", count) + + log.Println("Done!") +} + +type Post struct { + // db tag lets you specify the column name if it differs from the struct field + Id int64 `db:"post_id"` + Created int64 + Title string + Body string +} + +func newPost(title, body string) Post { + return Post{ + Created: time.Now().UnixNano(), + Title: title, + Body: body, + } +} + +func initDb() *gorp.DbMap { + // connect to db using standard Go database/sql API + // use whatever database/sql driver you wish + db, err := sql.Open("sqlite3", "/tmp/post_db.bin") + checkErr(err, "sql.Open failed") + + // construct a gorp DbMap + dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} + + // add a table, setting the table name to 'posts' and + // specifying that the Id property is an auto incrementing PK + dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id") + + // create the table. in a production system you'd generally + // use a migration tool, or create the tables via scripts + err = dbmap.CreateTablesIfNotExists() + checkErr(err, "Create tables failed") + + return dbmap +} + +func checkErr(err error, msg string) { + if err != nil { + log.Fatalln(msg, err) + } +} +``` + +## Examples + +### Mapping structs to tables + +First define some types: + +```go +type Invoice struct { + Id int64 + Created int64 + Updated int64 + Memo string + PersonId int64 +} + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string +} + +// Example of using tags to alias fields to column names +// The 'db' value is the column name +// +// A hyphen will cause gorp to skip this field, similar to the +// Go json package. +// +// This is equivalent to using the ColMap methods: +// +// table := dbmap.AddTableWithName(Product{}, "product") +// table.ColMap("Id").Rename("product_id") +// table.ColMap("Price").Rename("unit_price") +// table.ColMap("IgnoreMe").SetTransient(true) +// +type Product struct { + Id int64 `db:"product_id"` + Price int64 `db:"unit_price"` + IgnoreMe string `db:"-"` +} +``` + +Then create a mapper, typically you'd do this one time at app startup: + +```go +// connect to db using standard Go database/sql API +// use whatever database/sql driver you wish +db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword") + +// construct a gorp DbMap +dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}} + +// register the structs you wish to use with gorp +// you can also use the shorter dbmap.AddTable() if you +// don't want to override the table name +// +// SetKeys(true) means we have a auto increment primary key, which +// will get automatically bound to your struct post-insert +// +t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") +t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id") +t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id") +``` + +### Struct Embedding + +gorp supports embedding structs. For example: + +```go +type Names struct { + FirstName string + LastName string +} + +type WithEmbeddedStruct struct { + Id int64 + Names +} + +es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} +err := dbmap.Insert(es) +``` + +See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example. + +### Create/Drop Tables ### + +Automatically create / drop registered tables. This is useful for unit tests +but is entirely optional. You can of course use gorp with tables created manually, +or with a separate migration tool (like goose: https://bitbucket.org/liamstask/goose). + +```go +// create all registered tables +dbmap.CreateTables() + +// same as above, but uses "if not exists" clause to skip tables that are +// already defined +dbmap.CreateTablesIfNotExists() + +// drop +dbmap.DropTables() +``` + +### SQL Logging + +Optionally you can pass in a logger to trace all SQL statements. +I recommend enabling this initially while you're getting the feel for what +gorp is doing on your behalf. + +Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies. +However, you can write your own `GorpLogger` implementation, or use a package such +as `glog` if you want more control over how statements are logged. + +```go +// Will log all SQL statements + args as they are run +// The first arg is a string prefix to prepend to all log messages +dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds)) + +// Turn off tracing +dbmap.TraceOff() +``` + +### Insert + +```go +// Must declare as pointers so optional callback hooks +// can operate on your data, not copies +inv1 := &Invoice{0, 100, 200, "first order", 0} +inv2 := &Invoice{0, 100, 200, "second order", 0} + +// Insert your rows +err := dbmap.Insert(inv1, inv2) + +// Because we called SetKeys(true) on Invoice, the Id field +// will be populated after the Insert() automatically +fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id) +``` + +### Update + +Continuing the above example, use the `Update` method to modify an Invoice: + +```go +// count is the # of rows updated, which should be 1 in this example +count, err := dbmap.Update(inv1) +``` + +### Delete + +If you have primary key(s) defined for a struct, you can use the `Delete` +method to remove rows: + +```go +count, err := dbmap.Delete(inv1) +``` + +### Select by Key + +Use the `Get` method to fetch a single row by primary key. It returns +nil if no row is found. + +```go +// fetch Invoice with Id=99 +obj, err := dbmap.Get(Invoice{}, 99) +inv := obj.(*Invoice) +``` + +### Ad Hoc SQL + +#### SELECT + +`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice +or a single struct. + +```go +// Select a slice - first return value is not needed when a slice pointer is passed to Select() +var posts []Post +_, err := dbmap.Select(&posts, "select * from post order by id") + +// You can also use primitive types +var ids []string +_, err := dbmap.Select(&ids, "select id from post") + +// Select a single row. +// Returns an error if no row found, or if more than one row is found +var post Post +err := dbmap.SelectOne(&post, "select * from post where id=?", id) +``` + +Want to do joins? Just write the SQL and the struct. gorp will bind them: + +```go +// Define a type for your join +// It *must* contain all the columns in your SELECT statement +// +// The names here should match the aliased column names you specify +// in your SQL - no additional binding work required. simple. +// +type InvoicePersonView struct { + InvoiceId int64 + PersonId int64 + Memo string + FName string +} + +// Create some rows +p1 := &Person{0, 0, 0, "bob", "smith"} +dbmap.Insert(p1) + +// notice how we can wire up p1.Id to the invoice easily +inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id} +dbmap.Insert(inv1) + +// Run your query +query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + +// pass a slice to Select() +var list []InvoicePersonView +_, err := dbmap.Select(&list, query) + +// this should test true +expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName} +if reflect.DeepEqual(list[0], expected) { + fmt.Println("Woot! My join worked!") +} +``` + +#### SELECT string or int64 + +gorp provides a few convenience methods for selecting a single string or int64. + +```go +// select single int64 from db (use $1 instead of ? for postgresql) +i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal) + +// select single string from db: +s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal) + +``` + +#### Named bind parameters + +You may use a map or struct to bind parameters by name. This is currently +only supported in SELECT queries. + +```go +_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{ + "name": "Rob", + "age": 31, +}) +``` + +#### UPDATE / DELETE + +You can execute raw SQL if you wish. Particularly good for batch operations. + +```go +res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10) +``` + +### Transactions + +You can batch operations into a transaction: + +```go +func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error { + // Start a new transaction + trans, err := dbmap.Begin() + if err != nil { + return err + } + + trans.Insert(per) + inv.PersonId = per.Id + trans.Insert(inv) + + // if the commit is successful, a nil error is returned + return trans.Commit() +} +``` + +### Hooks + +Use hooks to update data before/after saving to the db. Good for timestamps: + +```go +// implement the PreInsert and PreUpdate hooks +func (i *Invoice) PreInsert(s gorp.SqlExecutor) error { + i.Created = time.Now().UnixNano() + i.Updated = i.Created + return nil +} + +func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error { + i.Updated = time.Now().UnixNano() + return nil +} + +// You can use the SqlExecutor to cascade additional SQL +// Take care to avoid cycles. gorp won't prevent them. +// +// Here's an example of a cascading delete +// +func (p *Person) PreDelete(s gorp.SqlExecutor) error { + query := "delete from invoice_test where PersonId=?" + err := s.Exec(query, p.Id); if err != nil { + return err + } + return nil +} +``` + +Full list of hooks that you can implement: + + PostGet + PreInsert + PostInsert + PreUpdate + PostUpdate + PreDelete + PostDelete + + All have the same signature. for example: + + func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error + +### Optimistic Locking + +gorp provides a simple optimistic locking feature, similar to Java's JPA, that +will raise an error if you try to update/delete a row whose `version` column +has a value different than the one in memory. This provides a safe way to do +"select then update" style operations without explicit read and write locks. + +```go +// Version is an auto-incremented number, managed by gorp +// If this property is present on your struct, update +// operations will be constrained +// +// For example, say we defined Person as: + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string + + // automatically used as the Version col + // use table.SetVersionCol("columnName") to map a different + // struct field as the version field + Version int64 +} + +p1 := &Person{0, 0, 0, "Bob", "Smith", 0} +dbmap.Insert(p1) // Version is now 1 + +obj, err := dbmap.Get(Person{}, p1.Id) +p2 := obj.(*Person) +p2.LName = "Edwards" +dbmap.Update(p2) // Version is now 2 + +p1.LName = "Howard" + +// Raises error because p1.Version == 1, which is out of date +count, err := dbmap.Update(p1) +_, ok := err.(gorp.OptimisticLockError) +if ok { + // should reach this statement + + // in a real app you might reload the row and retry, or + // you might propegate this to the user, depending on the desired + // semantics + fmt.Printf("Tried to update row with stale data: %v\n", err) +} else { + // some other db error occurred - log or return up the stack + fmt.Printf("Unknown db err: %v\n", err) +} +``` + +## Database Drivers + +gorp uses the Go 1 `database/sql` package. A full list of compliant drivers is available here: + +http://code.google.com/p/go-wiki/wiki/SQLDrivers + +Sadly, SQL databases differ on various issues. gorp provides a Dialect interface that should be +implemented per database vendor. Dialects are provided for: + +* MySQL +* PostgreSQL +* sqlite3 + +Each of these three databases pass the test suite. See `gorp_test.go` for example +DSNs for these three databases. + +Support is also provided for: + +* Oracle (contributed by @klaidliadon) +* SQL Server (contributed by @qrawl) - use driver: github.com/denisenkom/go-mssqldb + +Note that these databases are not covered by CI and I (@coopernurse) have no good way to +test them locally. So please try them and send patches as needed, but expect a bit more +unpredicability. + +## Known Issues + +### SQL placeholder portability + +Different databases use different strings to indicate variable placeholders in +prepared SQL statements. Unlike some database abstraction layers (such as JDBC), +Go's `database/sql` does not standardize this. + +SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` methods delegates +to a Dialect implementation for each database, and will generate portable SQL. + +Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, etc will not be +parsed. Consequently you may have portability issues if you write a query like this: + +```go +// works on MySQL and Sqlite3, but not with Postgresql +err := dbmap.SelectOne(&val, "select * from foo where id = ?", 30) +``` + +In `Select` and `SelectOne` you can use named parameters to work around this. +The following is portable: + +```go +err := dbmap.SelectOne(&val, "select * from foo where id = :id", + map[string]interface{} { "id": 30}) +``` + +### time.Time and time zones + +gorp will pass `time.Time` fields through to the `database/sql` driver, but note that +the behavior of this type varies across database drivers. + +MySQL users should be especially cautious. See: https://github.com/ziutek/mymysql/pull/77 + +To avoid any potential issues with timezone/DST, consider using an integer field for time +data and storing UNIX time. + +## Running the tests + +The included tests may be run against MySQL, Postgresql, or sqlite3. +You must set two environment variables so the test code knows which driver to +use, and how to connect to your database. + +```sh +# MySQL example: +export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123 +export GORP_TEST_DIALECT=mysql + +# run the tests +go test + +# run the tests and benchmarks +go test -bench="Bench" -benchtime 10 +``` + +Valid `GORP_TEST_DIALECT` values are: "mysql", "postgres", "sqlite3" +See the `test_all.sh` script for examples of all 3 databases. This is the script I run +locally to test the library. + +## Performance + +gorp uses reflection to construct SQL queries and bind parameters. See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a simple perf test. On my MacBook Pro gorp is about 2-3% slower than hand written SQL. + +## Help/Support + +IRC: #gorp +Mailing list: gorp-dev@googlegroups.com +Bugs/Enhancements: Create a github issue + +## Pull requests / Contributions + +Contributions are very welcome. Please follow these guidelines: + +* Fork the `master` branch and issue pull requests targeting the `master` branch +* If you are adding an enhancement, please open an issue first with your proposed change. +* Changes that break backwards compatibility in the public API are only accepted after we + discuss on a GitHub issue for a while. + +Thanks! + +## Contributors + +* matthias-margush - column aliasing via tags +* Rob Figueiredo - @robfig +* Quinn Slack - @sqs diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/dialect.go b/Godeps/_workspace/src/gopkg.in/gorp.v1/dialect.go new file mode 100644 index 00000000..5e8fdc66 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/dialect.go @@ -0,0 +1,692 @@ +package gorp + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// The Dialect interface encapsulates behaviors that differ across +// SQL databases. At present the Dialect is only used by CreateTables() +// but this could change in the future +type Dialect interface { + + // adds a suffix to any query, usually ";" + QuerySuffix() string + + // ToSqlType returns the SQL column type to use when creating a + // table of the given Go Type. maxsize can be used to switch based on + // size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB, + // or LONGBLOB depending on the maxsize + ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string + + // string to append to primary key column definitions + AutoIncrStr() string + + // string to bind autoincrement columns to. Empty string will + // remove reference to those columns in the INSERT statement. + AutoIncrBindValue() string + + AutoIncrInsertSuffix(col *ColumnMap) string + + // string to append to "create table" statement for vendor specific + // table attributes + CreateTableSuffix() string + + // string to truncate tables + TruncateClause() string + + // bind variable string to use when forming SQL statements + // in many dbs it is "?", but Postgres appears to use $1 + // + // i is a zero based index of the bind variable in this statement + // + BindVar(i int) string + + // Handles quoting of a field name to ensure that it doesn't raise any + // SQL parsing exceptions by using a reserved word as a field name. + QuoteField(field string) string + + // Handles building up of a schema.database string that is compatible with + // the given dialect + // + // schema - The schema that lives in + // table - The table name + QuotedTableForQuery(schema string, table string) string + + // Existance clause for table creation / deletion + IfSchemaNotExists(command, schema string) string + IfTableExists(command, schema, table string) string + IfTableNotExists(command, schema, table string) string +} + +// IntegerAutoIncrInserter is implemented by dialects that can perform +// inserts with automatically incremented integer primary keys. If +// the dialect can handle automatic assignment of more than just +// integers, see TargetedAutoIncrInserter. +type IntegerAutoIncrInserter interface { + InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) +} + +// TargetedAutoIncrInserter is implemented by dialects that can +// perform automatic assignment of any primary key type (i.e. strings +// for uuids, integers for serials, etc). +type TargetedAutoIncrInserter interface { + // InsertAutoIncrToTarget runs an insert operation and assigns the + // automatically generated primary key directly to the passed in + // target. The target should be a pointer to the primary key + // field of the value being inserted. + InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error +} + +func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + res, err := exec.Exec(insertSql, params...) + if err != nil { + return 0, err + } + return res.LastInsertId() +} + +/////////////////////////////////////////////////////// +// sqlite3 // +///////////// + +type SqliteDialect struct { + suffix string +} + +func (d SqliteDialect) QuerySuffix() string { return ";" } + +func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "integer" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return "integer" + case reflect.Float64, reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "blob" + } + } + + switch val.Name() { + case "NullInt64": + return "integer" + case "NullFloat64": + return "real" + case "NullBool": + return "integer" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns autoincrement +func (d SqliteDialect) AutoIncrStr() string { + return "autoincrement" +} + +func (d SqliteDialect) AutoIncrBindValue() string { + return "null" +} + +func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqliteDialect) CreateTableSuffix() string { + return d.suffix +} + +// With sqlite, there technically isn't a TRUNCATE statement, +// but a DELETE FROM uses a truncate optimization: +// http://www.sqlite.org/lang_delete.html +func (d SqliteDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqliteDialect) BindVar(i int) string { + return "?" +} + +func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqliteDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +// sqlite does not have schemas like PostgreSQL does, so just escape it like normal +func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string { + return d.QuoteField(table) +} + +func (d SqliteDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SqliteDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SqliteDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// PostgreSQL // +//////////////// + +type PostgresDialect struct { + suffix string +} + +func (d PostgresDialect) QuerySuffix() string { return ";" } + +func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d PostgresDialect) AutoIncrStr() string { + return "" +} + +func (d PostgresDialect) AutoIncrBindValue() string { + return "default" +} + +func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + col.ColumnName +} + +// Returns suffix +func (d PostgresDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d PostgresDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d PostgresDialect) BindVar(i int) string { + return fmt.Sprintf("$%d", i+1) +} + +func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if rows.Next() { + err := rows.Scan(target) + return err + } + + return errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error()) +} + +func (d PostgresDialect) QuoteField(f string) string { + return `"` + strings.ToLower(f) + `"` +} + +func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d PostgresDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d PostgresDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d PostgresDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// MySQL // +/////////// + +// Implementation of Dialect for MySQL databases. +type MySQLDialect struct { + + // Engine is the storage engine to use "InnoDB" vs "MyISAM" for example + Engine string + + // Encoding is the character encoding to use for created tables + Encoding string +} + +func (d MySQLDialect) QuerySuffix() string { return ";" } + +func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "tinyint unsigned" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "smallint unsigned" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "int unsigned" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint unsigned" + case reflect.Float64, reflect.Float32: + return "double" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "mediumblob" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns auto_increment +func (d MySQLDialect) AutoIncrStr() string { + return "auto_increment" +} + +func (d MySQLDialect) AutoIncrBindValue() string { + return "null" +} + +func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns engine=%s charset=%s based on values stored on struct +func (d MySQLDialect) CreateTableSuffix() string { + if d.Engine == "" || d.Encoding == "" { + msg := "gorp - undefined" + + if d.Engine == "" { + msg += " MySQLDialect.Engine" + } + if d.Engine == "" && d.Encoding == "" { + msg += "," + } + if d.Encoding == "" { + msg += " MySQLDialect.Encoding" + } + msg += ". Check that your MySQLDialect was correctly initialized when declared." + panic(msg) + } + + return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding) +} + +func (d MySQLDialect) TruncateClause() string { + return "truncate" +} + +// Returns "?" +func (d MySQLDialect) BindVar(i int) string { + return "?" +} + +func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d MySQLDialect) QuoteField(f string) string { + return "`" + f + "`" +} + +func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d MySQLDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d MySQLDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d MySQLDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// Sql Server // +//////////////// + +// Implementation of Dialect for Microsoft SQL Server databases. +// Tested on SQL Server 2008 with driver: github.com/denisenkom/go-mssqldb + +type SqlServerDialect struct { + suffix string +} + +func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "bit" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "smallint" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "int" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "bigint" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint" + case reflect.Float32: + return "real" + case reflect.Float64: + return "float(53)" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "varbinary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "float(53)" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns auto_increment +func (d SqlServerDialect) AutoIncrStr() string { + return "identity(0,1)" +} + +// Empty string removes autoincrement columns from the INSERT statements. +func (d SqlServerDialect) AutoIncrBindValue() string { + return "" +} + +func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqlServerDialect) CreateTableSuffix() string { + + return d.suffix +} + +func (d SqlServerDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqlServerDialect) BindVar(i int) string { + return "?" +} + +func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqlServerDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return table + } + return schema + "." + table +} + +func (d SqlServerDialect) QuerySuffix() string { return ";" } + +func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string { + s := fmt.Sprintf("if not exists (select name from sys.schemas where name = '%s') %s", schema, command) + return s +} + +func (d SqlServerDialect) IfTableExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema) + } + s := fmt.Sprintf("if exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command) + return s +} + +func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema) + } + s := fmt.Sprintf("if not exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command) + return s +} + +/////////////////////////////////////////////////////// +// Oracle // +/////////// + +// Implementation of Dialect for Oracle databases. +type OracleDialect struct{} + +func (d OracleDialect) QuerySuffix() string { return "" } + +func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "NullTime", "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d OracleDialect) AutoIncrStr() string { + return "" +} + +func (d OracleDialect) AutoIncrBindValue() string { + return "default" +} + +func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + col.ColumnName +} + +// Returns suffix +func (d OracleDialect) CreateTableSuffix() string { + return "" +} + +func (d OracleDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d OracleDialect) BindVar(i int) string { + return fmt.Sprintf(":%d", i+1) +} + +func (d OracleDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + rows, err := exec.query(insertSql, params...) + if err != nil { + return 0, err + } + defer rows.Close() + + if rows.Next() { + var id int64 + err := rows.Scan(&id) + return id, err + } + + return 0, errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error()) +} + +func (d OracleDialect) QuoteField(f string) string { + return `"` + strings.ToUpper(f) + `"` +} + +func (d OracleDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/errors.go b/Godeps/_workspace/src/gopkg.in/gorp.v1/errors.go new file mode 100644 index 00000000..356d6847 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/errors.go @@ -0,0 +1,26 @@ +package gorp + +import ( + "fmt" +) + +// A non-fatal error, when a select query returns columns that do not exist +// as fields in the struct it is being mapped to +type NoFieldInTypeError struct { + TypeName string + MissingColNames []string +} + +func (err *NoFieldInTypeError) Error() string { + return fmt.Sprintf("gorp: No fields %+v in type %s", err.MissingColNames, err.TypeName) +} + +// returns true if the error is non-fatal (ie, we shouldn't immediately return) +func NonFatalError(err error) bool { + switch err.(type) { + case *NoFieldInTypeError: + return true + default: + return false + } +} diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/gorp.go b/Godeps/_workspace/src/gopkg.in/gorp.v1/gorp.go new file mode 100644 index 00000000..1ad61870 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/gorp.go @@ -0,0 +1,2085 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/coopernurse/gorp +// +package gorp + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "reflect" + "regexp" + "strings" + "time" + "log" + "os" +) + +// Oracle String (empty string is null) +type OracleString struct { + sql.NullString +} + +// Scan implements the Scanner interface. +func (os *OracleString) Scan(value interface{}) error { + if value == nil { + os.String, os.Valid = "", false + return nil + } + os.Valid = true + return os.NullString.Scan(value) +} + +// Value implements the driver Valuer interface. +func (os OracleString) Value() (driver.Value, error) { + if !os.Valid || os.String == "" { + return nil, nil + } + return os.String, nil +} + +// A nullable Time value +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +var zeroVal reflect.Value +var versFieldConst = "[gorp_ver_field]" + +// OptimisticLockError is returned by Update() or Delete() if the +// struct being modified has a Version field and the value is not equal to +// the current value in the database +type OptimisticLockError struct { + // Table name where the lock error occurred + TableName string + + // Primary key values of the row being updated/deleted + Keys []interface{} + + // true if a row was found with those keys, indicating the + // LocalVersion is stale. false if no value was found with those + // keys, suggesting the row has been deleted since loaded, or + // was never inserted to begin with + RowExists bool + + // Version value on the struct passed to Update/Delete. This value is + // out of sync with the database. + LocalVersion int64 +} + +// Error returns a description of the cause of the lock error +func (e OptimisticLockError) Error() string { + if e.RowExists { + return fmt.Sprintf("gorp: OptimisticLockError table=%s keys=%v out of date version=%d", e.TableName, e.Keys, e.LocalVersion) + } + + return fmt.Sprintf("gorp: OptimisticLockError no row found for table=%s keys=%v", e.TableName, e.Keys) +} + +// The TypeConverter interface provides a way to map a value of one +// type to another type when persisting to, or loading from, a database. +// +// Example use cases: Implement type converter to convert bool types to "y"/"n" strings, +// or serialize a struct member as a JSON blob. +type TypeConverter interface { + // ToDb converts val to another type. Called before INSERT/UPDATE operations + ToDb(val interface{}) (interface{}, error) + + // FromDb returns a CustomScanner appropriate for this type. This will be used + // to hold values returned from SELECT queries. + // + // In particular the CustomScanner returned should implement a Binder + // function appropriate for the Go type you wish to convert the db value to + // + // If bool==false, then no custom scanner will be used for this field. + FromDb(target interface{}) (CustomScanner, bool) +} + +// CustomScanner binds a database column value to a Go type +type CustomScanner struct { + // After a row is scanned, Holder will contain the value from the database column. + // Initialize the CustomScanner with the concrete Go type you wish the database + // driver to scan the raw column into. + Holder interface{} + // Target typically holds a pointer to the target struct field to bind the Holder + // value to. + Target interface{} + // Binder is a custom function that converts the holder value to the target type + // and sets target accordingly. This function should return error if a problem + // occurs converting the holder to the target. + Binder func(holder interface{}, target interface{}) error +} + +// Bind is called automatically by gorp after Scan() +func (me CustomScanner) Bind() error { + return me.Binder(me.Holder, me.Target) +} + +// DbMap is the root gorp mapping object. Create one of these for each +// database schema you wish to map. Each DbMap contains a list of +// mapped tables. +// +// Example: +// +// dialect := gorp.MySQLDialect{"InnoDB", "UTF8"} +// dbmap := &gorp.DbMap{Db: db, Dialect: dialect} +// +type DbMap struct { + // Db handle to use with this map + Db *sql.DB + + // Dialect implementation to use with this map + Dialect Dialect + + TypeConverter TypeConverter + + tables []*TableMap + logger GorpLogger + logPrefix string +} + +// TableMap represents a mapping between a Go struct and a database table +// Use dbmap.AddTable() or dbmap.AddTableWithName() to create these +type TableMap struct { + // Name of database table. + TableName string + SchemaName string + gotype reflect.Type + Columns []*ColumnMap + keys []*ColumnMap + uniqueTogether [][]string + version *ColumnMap + insertPlan bindPlan + updatePlan bindPlan + deletePlan bindPlan + getPlan bindPlan + dbmap *DbMap +} + +// ResetSql removes cached insert/update/select/delete SQL strings +// associated with this TableMap. Call this if you've modified +// any column names or the table name itself. +func (t *TableMap) ResetSql() { + t.insertPlan = bindPlan{} + t.updatePlan = bindPlan{} + t.deletePlan = bindPlan{} + t.getPlan = bindPlan{} +} + +// SetKeys lets you specify the fields on a struct that map to primary +// key columns on the table. If isAutoIncr is set, result.LastInsertId() +// will be used after INSERT to bind the generated id to the Go struct. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if isAutoIncr is true, and fieldNames length != 1 +// +func (t *TableMap) SetKeys(isAutoIncr bool, fieldNames ...string) *TableMap { + if isAutoIncr && len(fieldNames) != 1 { + panic(fmt.Sprintf( + "gorp: SetKeys: fieldNames length must be 1 if key is auto-increment. (Saw %v fieldNames)", + len(fieldNames))) + } + t.keys = make([]*ColumnMap, 0) + for _, name := range fieldNames { + colmap := t.ColMap(name) + colmap.isPK = true + colmap.isAutoIncr = isAutoIncr + t.keys = append(t.keys, colmap) + } + t.ResetSql() + + return t +} + +// SetUniqueTogether lets you specify uniqueness constraints across multiple +// columns on the table. Each call adds an additional constraint for the +// specified columns. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if fieldNames length < 2. +// +func (t *TableMap) SetUniqueTogether(fieldNames ...string) *TableMap { + if len(fieldNames) < 2 { + panic(fmt.Sprintf( + "gorp: SetUniqueTogether: must provide at least two fieldNames to set uniqueness constraint.")) + } + + columns := make([]string, 0) + for _, name := range fieldNames { + columns = append(columns, name) + } + t.uniqueTogether = append(t.uniqueTogether, columns) + t.ResetSql() + + return t +} + +// ColMap returns the ColumnMap pointer matching the given struct field +// name. It panics if the struct does not contain a field matching this +// name. +func (t *TableMap) ColMap(field string) *ColumnMap { + col := colMapOrNil(t, field) + if col == nil { + e := fmt.Sprintf("No ColumnMap in table %s type %s with field %s", + t.TableName, t.gotype.Name(), field) + + panic(e) + } + return col +} + +func colMapOrNil(t *TableMap, field string) *ColumnMap { + for _, col := range t.Columns { + if col.fieldName == field || col.ColumnName == field { + return col + } + } + return nil +} + +// SetVersionCol sets the column to use as the Version field. By default +// the "Version" field is used. Returns the column found, or panics +// if the struct does not contain a field matching this name. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +func (t *TableMap) SetVersionCol(field string) *ColumnMap { + c := t.ColMap(field) + t.version = c + t.ResetSql() + return c +} + +type bindPlan struct { + query string + argFields []string + keyFields []string + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (plan bindPlan) createBindInstance(elem reflect.Value, conv TypeConverter) (bindInstance, error) { + bi := bindInstance{query: plan.query, autoIncrIdx: plan.autoIncrIdx, autoIncrFieldName: plan.autoIncrFieldName, versField: plan.versField} + if plan.versField != "" { + bi.existingVersion = elem.FieldByName(plan.versField).Int() + } + + var err error + + for i := 0; i < len(plan.argFields); i++ { + k := plan.argFields[i] + if k == versFieldConst { + newVer := bi.existingVersion + 1 + bi.args = append(bi.args, newVer) + if bi.existingVersion == 0 { + elem.FieldByName(plan.versField).SetInt(int64(newVer)) + } + } else { + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.args = append(bi.args, val) + } + } + + for i := 0; i < len(plan.keyFields); i++ { + k := plan.keyFields[i] + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.keys = append(bi.keys, val) + } + + return bi, nil +} + +type bindInstance struct { + query string + args []interface{} + keys []interface{} + existingVersion int64 + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (t *TableMap) bindInsert(elem reflect.Value) (bindInstance, error) { + plan := t.insertPlan + if plan.query == "" { + plan.autoIncrIdx = -1 + + s := bytes.Buffer{} + s2 := bytes.Buffer{} + s.WriteString(fmt.Sprintf("insert into %s (", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + first := true + for y := range t.Columns { + col := t.Columns[y] + if !(col.isAutoIncr && t.dbmap.Dialect.AutoIncrBindValue() == "") { + if !col.Transient { + if !first { + s.WriteString(",") + s2.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + + if col.isAutoIncr { + s2.WriteString(t.dbmap.Dialect.AutoIncrBindValue()) + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } else { + s2.WriteString(t.dbmap.Dialect.BindVar(x)) + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + + x++ + } + first = false + } + } else { + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } + } + s.WriteString(") values (") + s.WriteString(s2.String()) + s.WriteString(")") + if plan.autoIncrIdx > -1 { + s.WriteString(t.dbmap.Dialect.AutoIncrInsertSuffix(t.Columns[plan.autoIncrIdx])) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + t.insertPlan = plan + } + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindUpdate(elem reflect.Value) (bindInstance, error) { + plan := t.updatePlan + if plan.query == "" { + + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("update %s set ", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + x := 0 + + for y := range t.Columns { + col := t.Columns[y] + if !col.isAutoIncr && !col.Transient { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } + } + + s.WriteString(" where ") + for y := range t.keys { + col := t.keys[y] + if y > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.argFields = append(plan.argFields, col.fieldName) + plan.keyFields = append(plan.keyFields, col.fieldName) + x++ + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + t.updatePlan = plan + } + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindDelete(elem reflect.Value) (bindInstance, error) { + plan := t.deletePlan + if plan.query == "" { + + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("delete from %s", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + for y := range t.Columns { + col := t.Columns[y] + if !col.Transient { + if col == t.version { + plan.versField = col.fieldName + } + } + } + + s.WriteString(" where ") + for x := range t.keys { + k := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(k.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, k.fieldName) + plan.argFields = append(plan.argFields, k.fieldName) + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(len(plan.argFields))) + + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + t.deletePlan = plan + } + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindGet() bindPlan { + plan := t.getPlan + if plan.query == "" { + + s := bytes.Buffer{} + s.WriteString("select ") + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + plan.argFields = append(plan.argFields, col.fieldName) + x++ + } + } + s.WriteString(" from ") + s.WriteString(t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName)) + s.WriteString(" where ") + for x := range t.keys { + col := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, col.fieldName) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + t.getPlan = plan + } + + return plan +} + +// ColumnMap represents a mapping between a Go struct field and a single +// column in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type ColumnMap struct { + // Column name in db table + ColumnName string + + // If true, this column is skipped in generated SQL statements + Transient bool + + // If true, " unique" is added to create table statements. + // Not used elsewhere + Unique bool + + // Passed to Dialect.ToSqlType() to assist in informing the + // correct column type to map to in CreateTables() + // Not used elsewhere + MaxSize int + + fieldName string + gotype reflect.Type + isPK bool + isAutoIncr bool + isNotNull bool +} + +// Rename allows you to specify the column name in the table +// +// Example: table.ColMap("Updated").Rename("date_updated") +// +func (c *ColumnMap) Rename(colname string) *ColumnMap { + c.ColumnName = colname + return c +} + +// SetTransient allows you to mark the column as transient. If true +// this column will be skipped when SQL statements are generated +func (c *ColumnMap) SetTransient(b bool) *ColumnMap { + c.Transient = b + return c +} + +// SetUnique adds "unique" to the create table statements for this +// column, if b is true. +func (c *ColumnMap) SetUnique(b bool) *ColumnMap { + c.Unique = b + return c +} + +// SetNotNull adds "not null" to the create table statements for this +// column, if nn is true. +func (c *ColumnMap) SetNotNull(nn bool) *ColumnMap { + c.isNotNull = nn + return c +} + +// SetMaxSize specifies the max length of values of this column. This is +// passed to the dialect.ToSqlType() function, which can use the value +// to alter the generated type for "create table" statements +func (c *ColumnMap) SetMaxSize(size int) *ColumnMap { + c.MaxSize = size + return c +} + +// Transaction represents a database transaction. +// Insert/Update/Delete/Get/Exec operations will be run in the context +// of that transaction. Transactions should be terminated with +// a call to Commit() or Rollback() +type Transaction struct { + dbmap *DbMap + tx *sql.Tx + closed bool +} + +// SqlExecutor exposes gorp operations that can be run from Pre/Post +// hooks. This hides whether the current operation that triggered the +// hook is in a transaction. +// +// See the DbMap function docs for each of the functions below for more +// information. +type SqlExecutor interface { + Get(i interface{}, keys ...interface{}) (interface{}, error) + Insert(list ...interface{}) error + Update(list ...interface{}) (int64, error) + Delete(list ...interface{}) (int64, error) + Exec(query string, args ...interface{}) (sql.Result, error) + Select(i interface{}, query string, + args ...interface{}) ([]interface{}, error) + SelectInt(query string, args ...interface{}) (int64, error) + SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) + SelectFloat(query string, args ...interface{}) (float64, error) + SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) + SelectStr(query string, args ...interface{}) (string, error) + SelectNullStr(query string, args ...interface{}) (sql.NullString, error) + SelectOne(holder interface{}, query string, args ...interface{}) error + query(query string, args ...interface{}) (*sql.Rows, error) + queryRow(query string, args ...interface{}) *sql.Row +} + +// Compile-time check that DbMap and Transaction implement the SqlExecutor +// interface. +var _, _ SqlExecutor = &DbMap{}, &Transaction{} + +type GorpLogger interface { + Printf(format string, v ...interface{}) +} + +// TraceOn turns on SQL statement logging for this DbMap. After this is +// called, all SQL statements will be sent to the logger. If prefix is +// a non-empty string, it will be written to the front of all logged +// strings, which can aid in filtering log lines. +// +// Use TraceOn if you want to spy on the SQL statements that gorp +// generates. +// +// Note that the base log.Logger type satisfies GorpLogger, but adapters can +// easily be written for other logging packages (e.g., the golang-sanctioned +// glog framework). +func (m *DbMap) TraceOn(prefix string, logger GorpLogger) { + m.logger = logger + if prefix == "" { + m.logPrefix = prefix + } else { + m.logPrefix = fmt.Sprintf("%s ", prefix) + } +} + +// TraceOff turns off tracing. It is idempotent. +func (m *DbMap) TraceOff() { + m.logger = nil + m.logPrefix = "" +} + +// AddTable registers the given interface type with gorp. The table name +// will be given the name of the TypeOf(i). You must call this function, +// or AddTableWithName, for any struct type you wish to persist with +// the given DbMap. +// +// This operation is idempotent. If i's type is already mapped, the +// existing *TableMap is returned +func (m *DbMap) AddTable(i interface{}) *TableMap { + return m.AddTableWithName(i, "") +} + +// AddTableWithName has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithName(i interface{}, name string) *TableMap { + return m.AddTableWithNameAndSchema(i, "", name) +} + +// AddTableWithNameAndSchema has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithNameAndSchema(i interface{}, schema string, name string) *TableMap { + t := reflect.TypeOf(i) + if name == "" { + name = t.Name() + } + + // check if we have a table for this type already + // if so, update the name and return the existing pointer + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + table.TableName = name + return table + } + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + tmap.Columns, tmap.version = m.readStructColumns(t) + m.tables = append(m.tables, tmap) + + return tmap +} + +func (m *DbMap) readStructColumns(t reflect.Type) (cols []*ColumnMap, version *ColumnMap) { + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Anonymous && f.Type.Kind() == reflect.Struct { + // Recursively add nested fields in embedded structs. + subcols, subversion := m.readStructColumns(f.Type) + // Don't append nested fields that have the same field + // name as an already-mapped field. + for _, subcol := range subcols { + shouldAppend := true + for _, col := range cols { + if !subcol.Transient && subcol.fieldName == col.fieldName { + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, subcol) + } + } + if subversion != nil { + version = subversion + } + } else { + columnName := f.Tag.Get("db") + if columnName == "" { + columnName = f.Name + } + gotype := f.Type + if m.TypeConverter != nil { + // Make a new pointer to a value of type gotype and + // pass it to the TypeConverter's FromDb method to see + // if a different type should be used for the column + // type during table creation. + value := reflect.New(gotype).Interface() + scanner, useHolder := m.TypeConverter.FromDb(value) + if useHolder { + gotype = reflect.TypeOf(scanner.Holder) + } + } + cm := &ColumnMap{ + ColumnName: columnName, + Transient: columnName == "-", + fieldName: f.Name, + gotype: gotype, + } + // Check for nested fields of the same field name and + // override them. + shouldAppend := true + for index, col := range cols { + if !col.Transient && col.fieldName == cm.fieldName { + cols[index] = cm + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, cm) + } + if cm.fieldName == "Version" { + log.New(os.Stderr, "", log.LstdFlags).Println("Warning: Automatic mapping of Version struct members to version columns (see optimistic locking) will be deprecated in next version (V2) See: https://github.com/go-gorp/gorp/pull/214") + version = cm + } + } + } + return +} + +// CreateTables iterates through TableMaps registered to this DbMap and +// executes "create table" statements against the database for each. +// +// This is particularly useful in unit tests where you want to create +// and destroy the schema automatically. +func (m *DbMap) CreateTables() error { + return m.createTables(false) +} + +// CreateTablesIfNotExists is similar to CreateTables, but starts +// each statement with "create table if not exists" so that existing +// tables do not raise errors +func (m *DbMap) CreateTablesIfNotExists() error { + return m.createTables(true) +} + +func (m *DbMap) createTables(ifNotExists bool) error { + var err error + for i := range m.tables { + table := m.tables[i] + + s := bytes.Buffer{} + + if strings.TrimSpace(table.SchemaName) != "" { + schemaCreate := "create schema" + if ifNotExists { + s.WriteString(m.Dialect.IfSchemaNotExists(schemaCreate, table.SchemaName)) + } else { + s.WriteString(schemaCreate) + } + s.WriteString(fmt.Sprintf(" %s;", table.SchemaName)) + } + + tableCreate := "create table" + if ifNotExists { + s.WriteString(m.Dialect.IfTableNotExists(tableCreate, table.SchemaName, table.TableName)) + } else { + s.WriteString(tableCreate) + } + s.WriteString(fmt.Sprintf(" %s (", m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + + x := 0 + for _, col := range table.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(", ") + } + stype := m.Dialect.ToSqlType(col.gotype, col.MaxSize, col.isAutoIncr) + s.WriteString(fmt.Sprintf("%s %s", m.Dialect.QuoteField(col.ColumnName), stype)) + + if col.isPK || col.isNotNull { + s.WriteString(" not null") + } + if col.isPK && len(table.keys) == 1 { + s.WriteString(" primary key") + } + if col.Unique { + s.WriteString(" unique") + } + if col.isAutoIncr { + s.WriteString(fmt.Sprintf(" %s", m.Dialect.AutoIncrStr())) + } + + x++ + } + } + if len(table.keys) > 1 { + s.WriteString(", primary key (") + for x := range table.keys { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(table.keys[x].ColumnName)) + } + s.WriteString(")") + } + if len(table.uniqueTogether) > 0 { + for _, columns := range table.uniqueTogether { + s.WriteString(", unique (") + for i, column := range columns { + if i > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(column)) + } + s.WriteString(")") + } + } + s.WriteString(") ") + s.WriteString(m.Dialect.CreateTableSuffix()) + s.WriteString(m.Dialect.QuerySuffix()) + _, err = m.Exec(s.String()) + if err != nil { + break + } + } + return err +} + +// DropTable drops an individual table. Will throw an error +// if the table does not exist. +func (m *DbMap) DropTable(table interface{}) error { + t := reflect.TypeOf(table) + return m.dropTable(t, false) +} + +// DropTable drops an individual table. Will NOT throw an error +// if the table does not exist. +func (m *DbMap) DropTableIfExists(table interface{}) error { + t := reflect.TypeOf(table) + return m.dropTable(t, true) +} + +// DropTables iterates through TableMaps registered to this DbMap and +// executes "drop table" statements against the database for each. +func (m *DbMap) DropTables() error { + return m.dropTables(false) +} + +// DropTablesIfExists is the same as DropTables, but uses the "if exists" clause to +// avoid errors for tables that do not exist. +func (m *DbMap) DropTablesIfExists() error { + return m.dropTables(true) +} + +// Goes through all the registered tables, dropping them one by one. +// If an error is encountered, then it is returned and the rest of +// the tables are not dropped. +func (m *DbMap) dropTables(addIfExists bool) (err error) { + for _, table := range m.tables { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return + } + } + return err +} + +// Implementation of dropping a single table. +func (m *DbMap) dropTable(t reflect.Type, addIfExists bool) error { + table := tableOrNil(m, t) + if table == nil { + return errors.New(fmt.Sprintf("table %s was not registered!", table.TableName)) + } + + return m.dropTableImpl(table, addIfExists) +} + +func (m *DbMap) dropTableImpl(table *TableMap, ifExists bool) (err error) { + tableDrop := "drop table" + if ifExists { + tableDrop = m.Dialect.IfTableExists(tableDrop, table.SchemaName, table.TableName) + } + _, err = m.Exec(fmt.Sprintf("%s %s;", tableDrop, m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + return err +} + +// TruncateTables iterates through TableMaps registered to this DbMap and +// executes "truncate table" statements against the database for each, or in the case of +// sqlite, a "delete from" with no "where" clause, which uses the truncate optimization +// (http://www.sqlite.org/lang_delete.html) +func (m *DbMap) TruncateTables() error { + var err error + for i := range m.tables { + table := m.tables[i] + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + return err +} + +// Insert runs a SQL INSERT statement for each element in list. List +// items must be pointers. +// +// Any interface whose TableMap has an auto-increment primary key will +// have its last insert id bound to the PK field on the struct. +// +// The hook functions PreInsert() and/or PostInsert() will be executed +// before/after the INSERT statement if the interface defines them. +// +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Insert(list ...interface{}) error { + return insert(m, m, list...) +} + +// Update runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Update(list ...interface{}) (int64, error) { + return update(m, m, list...) +} + +// Delete runs a SQL DELETE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreDelete() and/or PostDelete() will be executed +// before/after the DELETE statement if the interface defines them. +// +// Returns the number of rows deleted. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Delete(list ...interface{}) (int64, error) { + return delete(m, m, list...) +} + +// Get runs a SQL SELECT to fetch a single row from the table based on the +// primary key(s) +// +// i should be an empty value for the struct to load. keys should be +// the primary key value(s) for the row to load. If multiple keys +// exist on the table, the order should match the column order +// specified in SetKeys() when the table mapping was defined. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Returns a pointer to a struct that matches or nil if no row is found. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(m, m, i, keys...) +} + +// Select runs an arbitrary SQL query, binding the columns in the result +// to fields on the struct specified by i. args represent the bind +// parameters for the SQL statement. +// +// Column names on the SELECT statement should be aliased to the field names +// on the struct i. Returns an error if one or more columns in the result +// do not match. It is OK if fields on i are not part of the SQL +// statement. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Values are returned in one of two ways: +// 1. If i is a struct or a pointer to a struct, returns a slice of pointers to +// matching rows of type i. +// 2. If i is a pointer to a slice, the results will be appended to that slice +// and nil returned. +// +// i does NOT need to be registered with AddTable() +func (m *DbMap) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(m, m, i, query, args...) +} + +// Exec runs an arbitrary SQL statement. args represent the bind parameters. +// This is equivalent to running: Exec() using database/sql +func (m *DbMap) Exec(query string, args ...interface{}) (sql.Result, error) { + m.trace(query, args...) + return m.Db.Exec(query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function +func (m *DbMap) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(m, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function +func (m *DbMap) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(m, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFlot function +func (m *DbMap) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(m, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function +func (m *DbMap) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(m, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function +func (m *DbMap) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(m, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function +func (m *DbMap) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(m, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function +func (m *DbMap) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(m, m, holder, query, args...) +} + +// Begin starts a gorp Transaction +func (m *DbMap) Begin() (*Transaction, error) { + m.trace("begin;") + tx, err := m.Db.Begin() + if err != nil { + return nil, err + } + return &Transaction{m, tx, false}, nil +} + +// TableFor returns the *TableMap corresponding to the given Go Type +// If no table is mapped to that type an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) TableFor(t reflect.Type, checkPK bool) (*TableMap, error) { + table := tableOrNil(m, t) + if table == nil { + return nil, errors.New(fmt.Sprintf("No table found for type: %v", t.Name())) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: No keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// Prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the returned statement. +// This is equivalent to running: Prepare() using database/sql +func (m *DbMap) Prepare(query string) (*sql.Stmt, error) { + m.trace(query, nil) + return m.Db.Prepare(query) +} + +func tableOrNil(m *DbMap, t reflect.Type) *TableMap { + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + return table + } + } + return nil +} + +func (m *DbMap) tableForPointer(ptr interface{}, checkPK bool) (*TableMap, reflect.Value, error) { + ptrv := reflect.ValueOf(ptr) + if ptrv.Kind() != reflect.Ptr { + e := fmt.Sprintf("gorp: passed non-pointer: %v (kind=%v)", ptr, + ptrv.Kind()) + return nil, reflect.Value{}, errors.New(e) + } + elem := ptrv.Elem() + etype := reflect.TypeOf(elem.Interface()) + t, err := m.TableFor(etype, checkPK) + if err != nil { + return nil, reflect.Value{}, err + } + + return t, elem, nil +} + +func (m *DbMap) queryRow(query string, args ...interface{}) *sql.Row { + m.trace(query, args...) + return m.Db.QueryRow(query, args...) +} + +func (m *DbMap) query(query string, args ...interface{}) (*sql.Rows, error) { + m.trace(query, args...) + return m.Db.Query(query, args...) +} + +func (m *DbMap) trace(query string, args ...interface{}) { + if m.logger != nil { + var margs = argsString(args...) + m.logger.Printf("%s%s [%s]", m.logPrefix, query, margs) + } +} + +func argsString(args ...interface{}) string { + var margs string + for i, a := range args { + var v interface{} = a + if x, ok := v.(driver.Valuer); ok { + y, err := x.Value() + if err == nil { + v = y + } + } + switch v.(type) { + case string: + v = fmt.Sprintf("%q", v) + default: + v = fmt.Sprintf("%v", v) + } + margs += fmt.Sprintf("%d:%s", i+1, v) + if i+1 < len(args) { + margs += " " + } + } + return margs +} + +/////////////// + +// Insert has the same behavior as DbMap.Insert(), but runs in a transaction. +func (t *Transaction) Insert(list ...interface{}) error { + return insert(t.dbmap, t, list...) +} + +// Update had the same behavior as DbMap.Update(), but runs in a transaction. +func (t *Transaction) Update(list ...interface{}) (int64, error) { + return update(t.dbmap, t, list...) +} + +// Delete has the same behavior as DbMap.Delete(), but runs in a transaction. +func (t *Transaction) Delete(list ...interface{}) (int64, error) { + return delete(t.dbmap, t, list...) +} + +// Get has the same behavior as DbMap.Get(), but runs in a transaction. +func (t *Transaction) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(t.dbmap, t, i, keys...) +} + +// Select has the same behavior as DbMap.Select(), but runs in a transaction. +func (t *Transaction) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(t.dbmap, t, i, query, args...) +} + +// Exec has the same behavior as DbMap.Exec(), but runs in a transaction. +func (t *Transaction) Exec(query string, args ...interface{}) (sql.Result, error) { + t.dbmap.trace(query, args...) + return t.tx.Exec(query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function. +func (t *Transaction) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(t, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function. +func (t *Transaction) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(t, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function. +func (t *Transaction) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(t, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function. +func (t *Transaction) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(t, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function. +func (t *Transaction) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(t, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function. +func (t *Transaction) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(t, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function. +func (t *Transaction) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(t.dbmap, t, holder, query, args...) +} + +// Commit commits the underlying database transaction. +func (t *Transaction) Commit() error { + if !t.closed { + t.closed = true + t.dbmap.trace("commit;") + return t.tx.Commit() + } + + return sql.ErrTxDone +} + +// Rollback rolls back the underlying database transaction. +func (t *Transaction) Rollback() error { + if !t.closed { + t.closed = true + t.dbmap.trace("rollback;") + return t.tx.Rollback() + } + + return sql.ErrTxDone +} + +// Savepoint creates a savepoint with the given name. The name is interpolated +// directly into the SQL SAVEPOINT statement, so you must sanitize it if it is +// derived from user input. +func (t *Transaction) Savepoint(name string) error { + query := "savepoint " + t.dbmap.Dialect.QuoteField(name) + t.dbmap.trace(query, nil) + _, err := t.tx.Exec(query) + return err +} + +// RollbackToSavepoint rolls back to the savepoint with the given name. The +// name is interpolated directly into the SQL SAVEPOINT statement, so you must +// sanitize it if it is derived from user input. +func (t *Transaction) RollbackToSavepoint(savepoint string) error { + query := "rollback to savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + t.dbmap.trace(query, nil) + _, err := t.tx.Exec(query) + return err +} + +// ReleaseSavepint releases the savepoint with the given name. The name is +// interpolated directly into the SQL SAVEPOINT statement, so you must sanitize +// it if it is derived from user input. +func (t *Transaction) ReleaseSavepoint(savepoint string) error { + query := "release savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + t.dbmap.trace(query, nil) + _, err := t.tx.Exec(query) + return err +} + +// Prepare has the same behavior as DbMap.Prepare(), but runs in a transaction. +func (t *Transaction) Prepare(query string) (*sql.Stmt, error) { + t.dbmap.trace(query, nil) + return t.tx.Prepare(query) +} + +func (t *Transaction) queryRow(query string, args ...interface{}) *sql.Row { + t.dbmap.trace(query, args...) + return t.tx.QueryRow(query, args...) +} + +func (t *Transaction) query(query string, args ...interface{}) (*sql.Rows, error) { + t.dbmap.trace(query, args...) + return t.tx.Query(query, args...) +} + +/////////////// + +// SelectInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectInt(e SqlExecutor, query string, args ...interface{}) (int64, error) { + var h int64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullInt(e SqlExecutor, query string, args ...interface{}) (sql.NullInt64, error) { + var h sql.NullInt64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectFloat(e SqlExecutor, query string, args ...interface{}) (float64, error) { + var h float64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullFloat(e SqlExecutor, query string, args ...interface{}) (sql.NullFloat64, error) { + var h sql.NullFloat64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectStr executes the given query, which should be a SELECT statement for a single +// char/varchar column, and returns the value of the first row returned. If no rows are +// found, an empty string is returned. +func SelectStr(e SqlExecutor, query string, args ...interface{}) (string, error) { + var h string + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return "", err + } + return h, nil +} + +// SelectNullStr executes the given query, which should be a SELECT +// statement for a single char/varchar column, and returns the value +// of the first row returned. If no rows are found, the empty +// sql.NullString is returned. +func SelectNullStr(e SqlExecutor, query string, args ...interface{}) (sql.NullString, error) { + var h sql.NullString + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectOne executes the given query (which should be a SELECT statement) +// and binds the result to holder, which must be a pointer. +// +// If no row is found, an error (sql.ErrNoRows specifically) will be returned +// +// If more than one row is found, an error will be returned. +// +func SelectOne(m *DbMap, e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + t := reflect.TypeOf(holder) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } else { + return fmt.Errorf("gorp: SelectOne holder must be a pointer, but got: %t", holder) + } + + // Handle pointer to pointer + isptr := false + if t.Kind() == reflect.Ptr { + isptr = true + t = t.Elem() + } + + if t.Kind() == reflect.Struct { + var nonFatalErr error + + list, err := hookedselect(m, e, holder, query, args...) + if err != nil { + if !NonFatalError(err) { + return err + } + nonFatalErr = err + } + + dest := reflect.ValueOf(holder) + if isptr { + dest = dest.Elem() + } + + if list != nil && len(list) > 0 { + // check for multiple rows + if len(list) > 1 { + return fmt.Errorf("gorp: multiple rows returned for: %s - %v", query, args) + } + + // Initialize if nil + if dest.IsNil() { + dest.Set(reflect.New(t)) + } + + // only one row found + src := reflect.ValueOf(list[0]) + dest.Elem().Set(src.Elem()) + } else { + // No rows found, return a proper error. + return sql.ErrNoRows + } + + return nonFatalErr + } + + return selectVal(e, holder, query, args...) +} + +func selectVal(e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + if len(args) == 1 { + switch m := e.(type) { + case *DbMap: + query, args = maybeExpandNamedQuery(m, query, args) + case *Transaction: + query, args = maybeExpandNamedQuery(m.dbmap, query, args) + } + } + rows, err := e.query(query, args...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return sql.ErrNoRows + } + + return rows.Scan(holder) +} + +/////////////// + +func hookedselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + + var nonFatalErr error + + list, err := rawselect(m, exec, i, query, args...) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + + // Determine where the results are: written to i, or returned in list + if t, _ := toSliceType(i); t == nil { + for _, v := range list { + if v, ok := v.(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } else { + resultsValue := reflect.Indirect(reflect.ValueOf(i)) + for i := 0; i < resultsValue.Len(); i++ { + if v, ok := resultsValue.Index(i).Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } + return list, nonFatalErr +} + +func rawselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + var ( + appendToSlice = false // Write results to i directly? + intoStruct = true // Selecting into a struct? + pointerElements = true // Are the slice elements pointers (vs values)? + ) + + var nonFatalErr error + + // get type for i, verifying it's a supported destination + t, err := toType(i) + if err != nil { + var err2 error + if t, err2 = toSliceType(i); t == nil { + if err2 != nil { + return nil, err2 + } + return nil, err + } + pointerElements = t.Kind() == reflect.Ptr + if pointerElements { + t = t.Elem() + } + appendToSlice = true + intoStruct = t.Kind() == reflect.Struct + } + + // If the caller supplied a single struct/map argument, assume a "named + // parameter" query. Extract the named arguments from the struct/map, create + // the flat arg slice, and rewrite the query to use the dialect's placeholder. + if len(args) == 1 { + query, args = maybeExpandNamedQuery(m, query, args) + } + + // Run the query + rows, err := exec.query(query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + // Fetch the column names as returned from db + cols, err := rows.Columns() + if err != nil { + return nil, err + } + + if !intoStruct && len(cols) > 1 { + return nil, fmt.Errorf("gorp: select into non-struct slice requires 1 column, got %d", len(cols)) + } + + var colToFieldIndex [][]int + if intoStruct { + if colToFieldIndex, err = columnToFieldIndex(m, t, cols); err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + } + + conv := m.TypeConverter + + // Add results to one of these two slices. + var ( + list = make([]interface{}, 0) + sliceValue = reflect.Indirect(reflect.ValueOf(i)) + ) + + for { + if !rows.Next() { + // if error occured return rawselect + if rows.Err() != nil { + return nil, rows.Err() + } + // time to exit from outer "for" loop + break + } + v := reflect.New(t) + dest := make([]interface{}, len(cols)) + + custScan := make([]CustomScanner, 0) + + for x := range cols { + f := v.Elem() + if intoStruct { + index := colToFieldIndex[x] + if index == nil { + // this field is not present in the struct, so create a dummy + // value for rows.Scan to scan into + var dummy sql.RawBytes + dest[x] = &dummy + continue + } + f = f.FieldByIndex(index) + } + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + err = rows.Scan(dest...) + if err != nil { + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if appendToSlice { + if !pointerElements { + v = v.Elem() + } + sliceValue.Set(reflect.Append(sliceValue, v)) + } else { + list = append(list, v.Interface()) + } + } + + if appendToSlice && sliceValue.IsNil() { + sliceValue.Set(reflect.MakeSlice(sliceValue.Type(), 0, 0)) + } + + return list, nonFatalErr +} + +// maybeExpandNamedQuery checks the given arg to see if it's eligible to be used +// as input to a named query. If so, it rewrites the query to use +// dialect-dependent bindvars and instantiates the corresponding slice of +// parameters by extracting data from the map / struct. +// If not, returns the input values unchanged. +func maybeExpandNamedQuery(m *DbMap, query string, args []interface{}) (string, []interface{}) { + arg := reflect.ValueOf(args[0]) + for arg.Kind() == reflect.Ptr { + arg = arg.Elem() + } + switch { + case arg.Kind() == reflect.Map && arg.Type().Key().Kind() == reflect.String: + return expandNamedQuery(m, query, func(key string) reflect.Value { + return arg.MapIndex(reflect.ValueOf(key)) + }) + // #84 - ignore time.Time structs here - there may be a cleaner way to do this + case arg.Kind() == reflect.Struct && !(arg.Type().PkgPath() == "time" && arg.Type().Name() == "Time"): + return expandNamedQuery(m, query, arg.FieldByName) + } + return query, args +} + +var keyRegexp = regexp.MustCompile(`:[[:word:]]+`) + +// expandNamedQuery accepts a query with placeholders of the form ":key", and a +// single arg of Kind Struct or Map[string]. It returns the query with the +// dialect's placeholders, and a slice of args ready for positional insertion +// into the query. +func expandNamedQuery(m *DbMap, query string, keyGetter func(key string) reflect.Value) (string, []interface{}) { + var ( + n int + args []interface{} + ) + return keyRegexp.ReplaceAllStringFunc(query, func(key string) string { + val := keyGetter(key[1:]) + if !val.IsValid() { + return key + } + args = append(args, val.Interface()) + newVar := m.Dialect.BindVar(n) + n++ + return newVar + }), args +} + +func columnToFieldIndex(m *DbMap, t reflect.Type, cols []string) ([][]int, error) { + colToFieldIndex := make([][]int, len(cols)) + + // check if type t is a mapped table - if so we'll + // check the table for column aliasing below + tableMapped := false + table := tableOrNil(m, t) + if table != nil { + tableMapped = true + } + + // Loop over column names and find field in i to bind to + // based on column name. all returned columns must match + // a field in the i struct + missingColNames := []string{} + for x := range cols { + colName := strings.ToLower(cols[x]) + field, found := t.FieldByNameFunc(func(fieldName string) bool { + field, _ := t.FieldByName(fieldName) + fieldName = field.Tag.Get("db") + + if fieldName == "-" { + return false + } else if fieldName == "" { + fieldName = field.Name + } + if tableMapped { + colMap := colMapOrNil(table, fieldName) + if colMap != nil { + fieldName = colMap.ColumnName + } + } + return colName == strings.ToLower(fieldName) + }) + if found { + colToFieldIndex[x] = field.Index + } + if colToFieldIndex[x] == nil { + missingColNames = append(missingColNames, colName) + } + } + if len(missingColNames) > 0 { + return colToFieldIndex, &NoFieldInTypeError{ + TypeName: t.Name(), + MissingColNames: missingColNames, + } + } + return colToFieldIndex, nil +} + +func fieldByName(val reflect.Value, fieldName string) *reflect.Value { + // try to find field by exact match + f := val.FieldByName(fieldName) + + if f != zeroVal { + return &f + } + + // try to find by case insensitive match - only the Postgres driver + // seems to require this - in the case where columns are aliased in the sql + fieldNameL := strings.ToLower(fieldName) + fieldCount := val.NumField() + t := val.Type() + for i := 0; i < fieldCount; i++ { + sf := t.Field(i) + if strings.ToLower(sf.Name) == fieldNameL { + f := val.Field(i) + return &f + } + } + + return nil +} + +// toSliceType returns the element type of the given object, if the object is a +// "*[]*Element" or "*[]Element". If not, returns nil. +// err is returned if the user was trying to pass a pointer-to-slice but failed. +func toSliceType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + if t.Kind() != reflect.Ptr { + // If it's a slice, return a more helpful error message + if t.Kind() == reflect.Slice { + return nil, fmt.Errorf("gorp: Cannot SELECT into a non-pointer slice: %v", t) + } + return nil, nil + } + if t = t.Elem(); t.Kind() != reflect.Slice { + return nil, nil + } + return t.Elem(), nil +} + +func toType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + + // If a Pointer to a type, follow + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("gorp: Cannot SELECT into this type: %v", reflect.TypeOf(i)) + } + return t, nil +} + +func get(m *DbMap, exec SqlExecutor, i interface{}, + keys ...interface{}) (interface{}, error) { + + t, err := toType(i) + if err != nil { + return nil, err + } + + table, err := m.TableFor(t, true) + if err != nil { + return nil, err + } + + plan := table.bindGet() + + v := reflect.New(t) + dest := make([]interface{}, len(plan.argFields)) + + conv := m.TypeConverter + custScan := make([]CustomScanner, 0) + + for x, fieldName := range plan.argFields { + f := v.Elem().FieldByName(fieldName) + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + row := exec.queryRow(plan.query, keys...) + err = row.Scan(dest...) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if v, ok := v.Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + + return v.Interface(), nil +} + +func delete(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreDelete); ok { + err = v.PreDelete(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindDelete(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + count += rows + + if v, ok := eval.(HasPostDelete); ok { + err := v.PostDelete(exec) + if err != nil { + return -1, err + } + } + } + + return count, nil +} + +func update(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreUpdate); ok { + err = v.PreUpdate(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindUpdate(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + if bi.versField != "" { + elem.FieldByName(bi.versField).SetInt(bi.existingVersion + 1) + } + + count += rows + + if v, ok := eval.(HasPostUpdate); ok { + err = v.PostUpdate(exec) + if err != nil { + return -1, err + } + } + } + return count, nil +} + +func insert(m *DbMap, exec SqlExecutor, list ...interface{}) error { + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, false) + if err != nil { + return err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreInsert); ok { + err := v.PreInsert(exec) + if err != nil { + return err + } + } + + bi, err := table.bindInsert(elem) + if err != nil { + return err + } + + if bi.autoIncrIdx > -1 { + f := elem.FieldByName(bi.autoIncrFieldName) + switch inserter := m.Dialect.(type) { + case IntegerAutoIncrInserter: + id, err := inserter.InsertAutoIncr(exec, bi.query, bi.args...) + if err != nil { + return err + } + k := f.Kind() + if (k == reflect.Int) || (k == reflect.Int16) || (k == reflect.Int32) || (k == reflect.Int64) { + f.SetInt(id) + } else if (k == reflect.Uint) || (k == reflect.Uint16) || (k == reflect.Uint32) || (k == reflect.Uint64) { + f.SetUint(uint64(id)) + } else { + return fmt.Errorf("gorp: Cannot set autoincrement value on non-Int field. SQL=%s autoIncrIdx=%d autoIncrFieldName=%s", bi.query, bi.autoIncrIdx, bi.autoIncrFieldName) + } + case TargetedAutoIncrInserter: + err := inserter.InsertAutoIncrToTarget(exec, bi.query, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + default: + return fmt.Errorf("gorp: Cannot use autoincrement fields on dialects that do not implement an autoincrementing interface") + } + } else { + _, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return err + } + } + + if v, ok := eval.(HasPostInsert); ok { + err := v.PostInsert(exec) + if err != nil { + return err + } + } + } + return nil +} + +func lockError(m *DbMap, exec SqlExecutor, tableName string, + existingVer int64, elem reflect.Value, + keys ...interface{}) (int64, error) { + + existing, err := get(m, exec, elem.Interface(), keys...) + if err != nil { + return -1, err + } + + ole := OptimisticLockError{tableName, keys, true, existingVer} + if existing == nil { + ole.RowExists = false + } + return -1, ole +} + +// PostUpdate() will be executed after the GET statement. +type HasPostGet interface { + PostGet(SqlExecutor) error +} + +// PostUpdate() will be executed after the DELETE statement +type HasPostDelete interface { + PostDelete(SqlExecutor) error +} + +// PostUpdate() will be executed after the UPDATE statement +type HasPostUpdate interface { + PostUpdate(SqlExecutor) error +} + +// PostInsert() will be executed after the INSERT statement +type HasPostInsert interface { + PostInsert(SqlExecutor) error +} + +// PreDelete() will be executed before the DELETE statement. +type HasPreDelete interface { + PreDelete(SqlExecutor) error +} + +// PreUpdate() will be executed before UPDATE statement. +type HasPreUpdate interface { + PreUpdate(SqlExecutor) error +} + +// PreInsert() will be executed before INSERT statement. +type HasPreInsert interface { + PreInsert(SqlExecutor) error +} diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/gorp_test.go b/Godeps/_workspace/src/gopkg.in/gorp.v1/gorp_test.go new file mode 100644 index 00000000..55182371 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/gorp_test.go @@ -0,0 +1,2083 @@ +package gorp + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" + _ "github.com/go-sql-driver/mysql" + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" + _ "github.com/ziutek/mymysql/godrv" + "log" + "math/rand" + "os" + "reflect" + "strings" + "testing" + "time" +) + +// verify interface compliance +var _ Dialect = SqliteDialect{} +var _ Dialect = PostgresDialect{} +var _ Dialect = MySQLDialect{} +var _ Dialect = SqlServerDialect{} +var _ Dialect = OracleDialect{} + +type testable interface { + GetId() int64 + Rand() +} + +type Invoice struct { + Id int64 + Created int64 + Updated int64 + Memo string + PersonId int64 + IsPaid bool +} + +func (me *Invoice) GetId() int64 { return me.Id } +func (me *Invoice) Rand() { + me.Memo = fmt.Sprintf("random %d", rand.Int63()) + me.Created = rand.Int63() + me.Updated = rand.Int63() +} + +type InvoiceTag struct { + Id int64 `db:"myid"` + Created int64 `db:"myCreated"` + Updated int64 `db:"date_updated"` + Memo string + PersonId int64 `db:"person_id"` + IsPaid bool `db:"is_Paid"` +} + +func (me *InvoiceTag) GetId() int64 { return me.Id } +func (me *InvoiceTag) Rand() { + me.Memo = fmt.Sprintf("random %d", rand.Int63()) + me.Created = rand.Int63() + me.Updated = rand.Int63() +} + +// See: https://github.com/coopernurse/gorp/issues/175 +type AliasTransientField struct { + Id int64 `db:"id"` + Bar int64 `db:"-"` + BarStr string `db:"bar"` +} + +func (me *AliasTransientField) GetId() int64 { return me.Id } +func (me *AliasTransientField) Rand() { + me.BarStr = fmt.Sprintf("random %d", rand.Int63()) +} + +type OverriddenInvoice struct { + Invoice + Id string +} + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string + Version int64 +} + +type FNameOnly struct { + FName string +} + +type InvoicePersonView struct { + InvoiceId int64 + PersonId int64 + Memo string + FName string + LegacyVersion int64 +} + +type TableWithNull struct { + Id int64 + Str sql.NullString + Int64 sql.NullInt64 + Float64 sql.NullFloat64 + Bool sql.NullBool + Bytes []byte +} + +type WithIgnoredColumn struct { + internal int64 `db:"-"` + Id int64 + Created int64 +} + +type IdCreated struct { + Id int64 + Created int64 +} + +type IdCreatedExternal struct { + IdCreated + External int64 +} + +type WithStringPk struct { + Id string + Name string +} + +type CustomStringType string + +type TypeConversionExample struct { + Id int64 + PersonJSON Person + Name CustomStringType +} + +type PersonUInt32 struct { + Id uint32 + Name string +} + +type PersonUInt64 struct { + Id uint64 + Name string +} + +type PersonUInt16 struct { + Id uint16 + Name string +} + +type WithEmbeddedStruct struct { + Id int64 + Names +} + +type WithEmbeddedStructBeforeAutoincrField struct { + Names + Id int64 +} + +type WithEmbeddedAutoincr struct { + WithEmbeddedStruct + MiddleName string +} + +type Names struct { + FirstName string + LastName string +} + +type UniqueColumns struct { + FirstName string + LastName string + City string + ZipCode int64 +} + +type SingleColumnTable struct { + SomeId string +} + +type CustomDate struct { + time.Time +} + +type WithCustomDate struct { + Id int64 + Added CustomDate +} + +type testTypeConverter struct{} + +func (me testTypeConverter) ToDb(val interface{}) (interface{}, error) { + + switch t := val.(type) { + case Person: + b, err := json.Marshal(t) + if err != nil { + return "", err + } + return string(b), nil + case CustomStringType: + return string(t), nil + case CustomDate: + return t.Time, nil + } + + return val, nil +} + +func (me testTypeConverter) FromDb(target interface{}) (CustomScanner, bool) { + switch target.(type) { + case *Person: + binder := func(holder, target interface{}) error { + s, ok := holder.(*string) + if !ok { + return errors.New("FromDb: Unable to convert Person to *string") + } + b := []byte(*s) + return json.Unmarshal(b, target) + } + return CustomScanner{new(string), target, binder}, true + case *CustomStringType: + binder := func(holder, target interface{}) error { + s, ok := holder.(*string) + if !ok { + return errors.New("FromDb: Unable to convert CustomStringType to *string") + } + st, ok := target.(*CustomStringType) + if !ok { + return errors.New(fmt.Sprint("FromDb: Unable to convert target to *CustomStringType: ", reflect.TypeOf(target))) + } + *st = CustomStringType(*s) + return nil + } + return CustomScanner{new(string), target, binder}, true + case *CustomDate: + binder := func(holder, target interface{}) error { + t, ok := holder.(*time.Time) + if !ok { + return errors.New("FromDb: Unable to convert CustomDate to *time.Time") + } + dateTarget, ok := target.(*CustomDate) + if !ok { + return errors.New(fmt.Sprint("FromDb: Unable to convert target to *CustomDate: ", reflect.TypeOf(target))) + } + dateTarget.Time = *t + return nil + } + return CustomScanner{new(time.Time), target, binder}, true + } + + return CustomScanner{}, false +} + +func (p *Person) PreInsert(s SqlExecutor) error { + p.Created = time.Now().UnixNano() + p.Updated = p.Created + if p.FName == "badname" { + return fmt.Errorf("Invalid name: %s", p.FName) + } + return nil +} + +func (p *Person) PostInsert(s SqlExecutor) error { + p.LName = "postinsert" + return nil +} + +func (p *Person) PreUpdate(s SqlExecutor) error { + p.FName = "preupdate" + return nil +} + +func (p *Person) PostUpdate(s SqlExecutor) error { + p.LName = "postupdate" + return nil +} + +func (p *Person) PreDelete(s SqlExecutor) error { + p.FName = "predelete" + return nil +} + +func (p *Person) PostDelete(s SqlExecutor) error { + p.LName = "postdelete" + return nil +} + +func (p *Person) PostGet(s SqlExecutor) error { + p.LName = "postget" + return nil +} + +type PersistentUser struct { + Key int32 + Id string + PassedTraining bool +} + +func TestCreateTablesIfNotExists(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + err := dbmap.CreateTablesIfNotExists() + if err != nil { + t.Error(err) + } +} + +func TestTruncateTables(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + err := dbmap.CreateTablesIfNotExists() + if err != nil { + t.Error(err) + } + + // Insert some data + p1 := &Person{0, 0, 0, "Bob", "Smith", 0} + dbmap.Insert(p1) + inv := &Invoice{0, 0, 1, "my invoice", 0, true} + dbmap.Insert(inv) + + err = dbmap.TruncateTables() + if err != nil { + t.Error(err) + } + + // Make sure all rows are deleted + rows, _ := dbmap.Select(Person{}, "SELECT * FROM person_test") + if len(rows) != 0 { + t.Errorf("Expected 0 person rows, got %d", len(rows)) + } + rows, _ = dbmap.Select(Invoice{}, "SELECT * FROM invoice_test") + if len(rows) != 0 { + t.Errorf("Expected 0 invoice rows, got %d", len(rows)) + } +} + +func TestCustomDateType(t *testing.T) { + dbmap := newDbMap() + dbmap.TypeConverter = testTypeConverter{} + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(WithCustomDate{}).SetKeys(true, "Id") + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + + test1 := &WithCustomDate{Added: CustomDate{Time: time.Now().Truncate(time.Second)}} + err = dbmap.Insert(test1) + if err != nil { + t.Errorf("Could not insert struct with custom date field: %s", err) + t.FailNow() + } + // Unfortunately, the mysql driver doesn't handle time.Time + // values properly during Get(). I can't find a way to work + // around that problem - every other type that I've tried is just + // silently converted. time.Time is the only type that causes + // the issue that this test checks for. As such, if the driver is + // mysql, we'll just skip the rest of this test. + if _, driver := dialectAndDriver(); driver == "mysql" { + t.Skip("TestCustomDateType can't run Get() with the mysql driver; skipping the rest of this test...") + } + result, err := dbmap.Get(new(WithCustomDate), test1.Id) + if err != nil { + t.Errorf("Could not get struct with custom date field: %s", err) + t.FailNow() + } + test2 := result.(*WithCustomDate) + if test2.Added.UTC() != test1.Added.UTC() { + t.Errorf("Custom dates do not match: %v != %v", test2.Added.UTC(), test1.Added.UTC()) + } +} + +func TestUIntPrimaryKey(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(PersonUInt64{}).SetKeys(true, "Id") + dbmap.AddTable(PersonUInt32{}).SetKeys(true, "Id") + dbmap.AddTable(PersonUInt16{}).SetKeys(true, "Id") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + + p1 := &PersonUInt64{0, "name1"} + p2 := &PersonUInt32{0, "name2"} + p3 := &PersonUInt16{0, "name3"} + err = dbmap.Insert(p1, p2, p3) + if err != nil { + t.Error(err) + } + if p1.Id != 1 { + t.Errorf("%d != 1", p1.Id) + } + if p2.Id != 1 { + t.Errorf("%d != 1", p2.Id) + } + if p3.Id != 1 { + t.Errorf("%d != 1", p3.Id) + } +} + +func TestSetUniqueTogether(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(UniqueColumns{}).SetUniqueTogether("FirstName", "LastName").SetUniqueTogether("City", "ZipCode") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + + n1 := &UniqueColumns{"Steve", "Jobs", "Cupertino", 95014} + err = dbmap.Insert(n1) + if err != nil { + t.Error(err) + } + + // Should fail because of the first constraint + n2 := &UniqueColumns{"Steve", "Jobs", "Sunnyvale", 94085} + err = dbmap.Insert(n2) + if err == nil { + t.Error(err) + } + // "unique" for Postgres/SQLite, "Duplicate entry" for MySQL + errLower := strings.ToLower(err.Error()) + if !strings.Contains(errLower, "unique") && !strings.Contains(errLower, "duplicate entry") { + t.Error(err) + } + + // Should also fail because of the second unique-together + n3 := &UniqueColumns{"Steve", "Wozniak", "Cupertino", 95014} + err = dbmap.Insert(n3) + if err == nil { + t.Error(err) + } + // "unique" for Postgres/SQLite, "Duplicate entry" for MySQL + errLower = strings.ToLower(err.Error()) + if !strings.Contains(errLower, "unique") && !strings.Contains(errLower, "duplicate entry") { + t.Error(err) + } + + // This one should finally succeed + n4 := &UniqueColumns{"Steve", "Wozniak", "Sunnyvale", 94085} + err = dbmap.Insert(n4) + if err != nil { + t.Error(err) + } +} + +func TestPersistentUser(t *testing.T) { + dbmap := newDbMap() + dbmap.Exec("drop table if exists PersistentUser") + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + table := dbmap.AddTable(PersistentUser{}).SetKeys(false, "Key") + table.ColMap("Key").Rename("mykey") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + pu := &PersistentUser{43, "33r", false} + err = dbmap.Insert(pu) + if err != nil { + panic(err) + } + + // prove we can pass a pointer into Get + pu2, err := dbmap.Get(pu, pu.Key) + if err != nil { + panic(err) + } + if !reflect.DeepEqual(pu, pu2) { + t.Errorf("%v!=%v", pu, pu2) + } + + arr, err := dbmap.Select(pu, "select * from PersistentUser") + if err != nil { + panic(err) + } + if !reflect.DeepEqual(pu, arr[0]) { + t.Errorf("%v!=%v", pu, arr[0]) + } + + // prove we can get the results back in a slice + var puArr []*PersistentUser + _, err = dbmap.Select(&puArr, "select * from PersistentUser") + if err != nil { + panic(err) + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu, puArr[0]) { + t.Errorf("%v!=%v", pu, puArr[0]) + } + + // prove we can get the results back in a non-pointer slice + var puValues []PersistentUser + _, err = dbmap.Select(&puValues, "select * from PersistentUser") + if err != nil { + panic(err) + } + if len(puValues) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(*pu, puValues[0]) { + t.Errorf("%v!=%v", *pu, puValues[0]) + } + + // prove we can get the results back in a string slice + var idArr []*string + _, err = dbmap.Select(&idArr, "select Id from PersistentUser") + if err != nil { + panic(err) + } + if len(idArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu.Id, *idArr[0]) { + t.Errorf("%v!=%v", pu.Id, *idArr[0]) + } + + // prove we can get the results back in an int slice + var keyArr []*int32 + _, err = dbmap.Select(&keyArr, "select mykey from PersistentUser") + if err != nil { + panic(err) + } + if len(keyArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu.Key, *keyArr[0]) { + t.Errorf("%v!=%v", pu.Key, *keyArr[0]) + } + + // prove we can get the results back in a bool slice + var passedArr []*bool + _, err = dbmap.Select(&passedArr, "select PassedTraining from PersistentUser") + if err != nil { + panic(err) + } + if len(passedArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu.PassedTraining, *passedArr[0]) { + t.Errorf("%v!=%v", pu.PassedTraining, *passedArr[0]) + } + + // prove we can get the results back in a non-pointer slice + var stringArr []string + _, err = dbmap.Select(&stringArr, "select Id from PersistentUser") + if err != nil { + panic(err) + } + if len(stringArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu.Id, stringArr[0]) { + t.Errorf("%v!=%v", pu.Id, stringArr[0]) + } +} + +func TestNamedQueryMap(t *testing.T) { + dbmap := newDbMap() + dbmap.Exec("drop table if exists PersistentUser") + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + table := dbmap.AddTable(PersistentUser{}).SetKeys(false, "Key") + table.ColMap("Key").Rename("mykey") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + pu := &PersistentUser{43, "33r", false} + pu2 := &PersistentUser{500, "abc", false} + err = dbmap.Insert(pu, pu2) + if err != nil { + panic(err) + } + + // Test simple case + var puArr []*PersistentUser + _, err = dbmap.Select(&puArr, "select * from PersistentUser where mykey = :Key", map[string]interface{}{ + "Key": 43, + }) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu, puArr[0]) { + t.Errorf("%v!=%v", pu, puArr[0]) + } + + // Test more specific map value type is ok + puArr = nil + _, err = dbmap.Select(&puArr, "select * from PersistentUser where mykey = :Key", map[string]int{ + "Key": 43, + }) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + + // Test multiple parameters set. + puArr = nil + _, err = dbmap.Select(&puArr, ` +select * from PersistentUser + where mykey = :Key + and PassedTraining = :PassedTraining + and Id = :Id`, map[string]interface{}{ + "Key": 43, + "PassedTraining": false, + "Id": "33r", + }) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + + // Test colon within a non-key string + // Test having extra, unused properties in the map. + puArr = nil + _, err = dbmap.Select(&puArr, ` +select * from PersistentUser + where mykey = :Key + and Id != 'abc:def'`, map[string]interface{}{ + "Key": 43, + "PassedTraining": false, + }) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } +} + +func TestNamedQueryStruct(t *testing.T) { + dbmap := newDbMap() + dbmap.Exec("drop table if exists PersistentUser") + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + table := dbmap.AddTable(PersistentUser{}).SetKeys(false, "Key") + table.ColMap("Key").Rename("mykey") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + pu := &PersistentUser{43, "33r", false} + pu2 := &PersistentUser{500, "abc", false} + err = dbmap.Insert(pu, pu2) + if err != nil { + panic(err) + } + + // Test select self + var puArr []*PersistentUser + _, err = dbmap.Select(&puArr, ` +select * from PersistentUser + where mykey = :Key + and PassedTraining = :PassedTraining + and Id = :Id`, pu) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu, puArr[0]) { + t.Errorf("%v!=%v", pu, puArr[0]) + } +} + +// Ensure that the slices containing SQL results are non-nil when the result set is empty. +func TestReturnsNonNilSlice(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + noResultsSQL := "select * from invoice_test where id=99999" + var r1 []*Invoice + _rawselect(dbmap, &r1, noResultsSQL) + if r1 == nil { + t.Errorf("r1==nil") + } + + r2 := _rawselect(dbmap, Invoice{}, noResultsSQL) + if r2 == nil { + t.Errorf("r2==nil") + } +} + +func TestOverrideVersionCol(t *testing.T) { + dbmap := newDbMap() + t1 := dbmap.AddTable(InvoicePersonView{}).SetKeys(false, "InvoiceId", "PersonId") + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + c1 := t1.SetVersionCol("LegacyVersion") + if c1.ColumnName != "LegacyVersion" { + t.Errorf("Wrong col returned: %v", c1) + } + + ipv := &InvoicePersonView{1, 2, "memo", "fname", 0} + _update(dbmap, ipv) + if ipv.LegacyVersion != 1 { + t.Errorf("LegacyVersion not updated: %d", ipv.LegacyVersion) + } +} + +func TestOptimisticLocking(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "Bob", "Smith", 0} + dbmap.Insert(p1) // Version is now 1 + if p1.Version != 1 { + t.Errorf("Insert didn't incr Version: %d != %d", 1, p1.Version) + return + } + if p1.Id == 0 { + t.Errorf("Insert didn't return a generated PK") + return + } + + obj, err := dbmap.Get(Person{}, p1.Id) + if err != nil { + panic(err) + } + p2 := obj.(*Person) + p2.LName = "Edwards" + dbmap.Update(p2) // Version is now 2 + if p2.Version != 2 { + t.Errorf("Update didn't incr Version: %d != %d", 2, p2.Version) + } + + p1.LName = "Howard" + count, err := dbmap.Update(p1) + if _, ok := err.(OptimisticLockError); !ok { + t.Errorf("update - Expected OptimisticLockError, got: %v", err) + } + if count != -1 { + t.Errorf("update - Expected -1 count, got: %d", count) + } + + count, err = dbmap.Delete(p1) + if _, ok := err.(OptimisticLockError); !ok { + t.Errorf("delete - Expected OptimisticLockError, got: %v", err) + } + if count != -1 { + t.Errorf("delete - Expected -1 count, got: %d", count) + } +} + +// what happens if a legacy table has a null value? +func TestDoubleAddTable(t *testing.T) { + dbmap := newDbMap() + t1 := dbmap.AddTable(TableWithNull{}).SetKeys(false, "Id") + t2 := dbmap.AddTable(TableWithNull{}) + if t1 != t2 { + t.Errorf("%v != %v", t1, t2) + } +} + +// what happens if a legacy table has a null value? +func TestNullValues(t *testing.T) { + dbmap := initDbMapNulls() + defer dropAndClose(dbmap) + + // insert a row directly + _rawexec(dbmap, "insert into TableWithNull values (10, null, "+ + "null, null, null, null)") + + // try to load it + expected := &TableWithNull{Id: 10} + obj := _get(dbmap, TableWithNull{}, 10) + t1 := obj.(*TableWithNull) + if !reflect.DeepEqual(expected, t1) { + t.Errorf("%v != %v", expected, t1) + } + + // update it + t1.Str = sql.NullString{"hi", true} + expected.Str = t1.Str + t1.Int64 = sql.NullInt64{999, true} + expected.Int64 = t1.Int64 + t1.Float64 = sql.NullFloat64{53.33, true} + expected.Float64 = t1.Float64 + t1.Bool = sql.NullBool{true, true} + expected.Bool = t1.Bool + t1.Bytes = []byte{1, 30, 31, 33} + expected.Bytes = t1.Bytes + _update(dbmap, t1) + + obj = _get(dbmap, TableWithNull{}, 10) + t1 = obj.(*TableWithNull) + if t1.Str.String != "hi" { + t.Errorf("%s != hi", t1.Str.String) + } + if !reflect.DeepEqual(expected, t1) { + t.Errorf("%v != %v", expected, t1) + } +} + +func TestColumnProps(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + t1 := dbmap.AddTable(Invoice{}).SetKeys(true, "Id") + t1.ColMap("Created").Rename("date_created") + t1.ColMap("Updated").SetTransient(true) + t1.ColMap("Memo").SetMaxSize(10) + t1.ColMap("PersonId").SetUnique(true) + + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + + // test transient + inv := &Invoice{0, 0, 1, "my invoice", 0, true} + _insert(dbmap, inv) + obj := _get(dbmap, Invoice{}, inv.Id) + inv = obj.(*Invoice) + if inv.Updated != 0 { + t.Errorf("Saved transient column 'Updated'") + } + + // test max size + inv.Memo = "this memo is too long" + err = dbmap.Insert(inv) + if err == nil { + t.Errorf("max size exceeded, but Insert did not fail.") + } + + // test unique - same person id + inv = &Invoice{0, 0, 1, "my invoice2", 0, false} + err = dbmap.Insert(inv) + if err == nil { + t.Errorf("same PersonId inserted, but Insert did not fail.") + } +} + +func TestRawSelect(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "bob", "smith", 0} + _insert(dbmap, p1) + + inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id, true} + _insert(dbmap, inv1) + + expected := &InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName, 0} + + query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + list := _rawselect(dbmap, InvoicePersonView{}, query) + if len(list) != 1 { + t.Errorf("len(list) != 1: %d", len(list)) + } else if !reflect.DeepEqual(expected, list[0]) { + t.Errorf("%v != %v", expected, list[0]) + } +} + +func TestHooks(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "bob", "smith", 0} + _insert(dbmap, p1) + if p1.Created == 0 || p1.Updated == 0 { + t.Errorf("p1.PreInsert() didn't run: %v", p1) + } else if p1.LName != "postinsert" { + t.Errorf("p1.PostInsert() didn't run: %v", p1) + } + + obj := _get(dbmap, Person{}, p1.Id) + p1 = obj.(*Person) + if p1.LName != "postget" { + t.Errorf("p1.PostGet() didn't run: %v", p1) + } + + _update(dbmap, p1) + if p1.FName != "preupdate" { + t.Errorf("p1.PreUpdate() didn't run: %v", p1) + } else if p1.LName != "postupdate" { + t.Errorf("p1.PostUpdate() didn't run: %v", p1) + } + + var persons []*Person + bindVar := dbmap.Dialect.BindVar(0) + _rawselect(dbmap, &persons, "select * from person_test where id = "+bindVar, p1.Id) + if persons[0].LName != "postget" { + t.Errorf("p1.PostGet() didn't run after select: %v", p1) + } + + _del(dbmap, p1) + if p1.FName != "predelete" { + t.Errorf("p1.PreDelete() didn't run: %v", p1) + } else if p1.LName != "postdelete" { + t.Errorf("p1.PostDelete() didn't run: %v", p1) + } + + // Test error case + p2 := &Person{0, 0, 0, "badname", "", 0} + err := dbmap.Insert(p2) + if err == nil { + t.Errorf("p2.PreInsert() didn't return an error") + } +} + +func TestTransaction(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv1 := &Invoice{0, 100, 200, "t1", 0, true} + inv2 := &Invoice{0, 100, 200, "t2", 0, false} + + trans, err := dbmap.Begin() + if err != nil { + panic(err) + } + trans.Insert(inv1, inv2) + err = trans.Commit() + if err != nil { + panic(err) + } + + obj, err := dbmap.Get(Invoice{}, inv1.Id) + if err != nil { + panic(err) + } + if !reflect.DeepEqual(inv1, obj) { + t.Errorf("%v != %v", inv1, obj) + } + obj, err = dbmap.Get(Invoice{}, inv2.Id) + if err != nil { + panic(err) + } + if !reflect.DeepEqual(inv2, obj) { + t.Errorf("%v != %v", inv2, obj) + } +} + +func TestSavepoint(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv1 := &Invoice{0, 100, 200, "unpaid", 0, false} + + trans, err := dbmap.Begin() + if err != nil { + panic(err) + } + trans.Insert(inv1) + + var checkMemo = func(want string) { + memo, err := trans.SelectStr("select memo from invoice_test") + if err != nil { + panic(err) + } + if memo != want { + t.Errorf("%q != %q", want, memo) + } + } + checkMemo("unpaid") + + err = trans.Savepoint("foo") + if err != nil { + panic(err) + } + checkMemo("unpaid") + + inv1.Memo = "paid" + _, err = trans.Update(inv1) + if err != nil { + panic(err) + } + checkMemo("paid") + + err = trans.RollbackToSavepoint("foo") + if err != nil { + panic(err) + } + checkMemo("unpaid") + + err = trans.Rollback() + if err != nil { + panic(err) + } +} + +func TestMultiple(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv1 := &Invoice{0, 100, 200, "a", 0, false} + inv2 := &Invoice{0, 100, 200, "b", 0, true} + _insert(dbmap, inv1, inv2) + + inv1.Memo = "c" + inv2.Memo = "d" + _update(dbmap, inv1, inv2) + + count := _del(dbmap, inv1, inv2) + if count != 2 { + t.Errorf("%d != 2", count) + } +} + +func TestCrud(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv := &Invoice{0, 100, 200, "first order", 0, true} + testCrudInternal(t, dbmap, inv) + + invtag := &InvoiceTag{0, 300, 400, "some order", 33, false} + testCrudInternal(t, dbmap, invtag) + + foo := &AliasTransientField{BarStr: "some bar"} + testCrudInternal(t, dbmap, foo) +} + +func testCrudInternal(t *testing.T, dbmap *DbMap, val testable) { + table, _, err := dbmap.tableForPointer(val, false) + if err != nil { + t.Errorf("couldn't call TableFor: val=%v err=%v", val, err) + } + + _, err = dbmap.Exec("delete from " + table.TableName) + if err != nil { + t.Errorf("couldn't delete rows from: val=%v err=%v", val, err) + } + + // INSERT row + _insert(dbmap, val) + if val.GetId() == 0 { + t.Errorf("val.GetId() was not set on INSERT") + return + } + + // SELECT row + val2 := _get(dbmap, val, val.GetId()) + if !reflect.DeepEqual(val, val2) { + t.Errorf("%v != %v", val, val2) + } + + // UPDATE row and SELECT + val.Rand() + count := _update(dbmap, val) + if count != 1 { + t.Errorf("update 1 != %d", count) + } + val2 = _get(dbmap, val, val.GetId()) + if !reflect.DeepEqual(val, val2) { + t.Errorf("%v != %v", val, val2) + } + + // Select * + rows, err := dbmap.Select(val, "select * from "+table.TableName) + if err != nil { + t.Errorf("couldn't select * from %s err=%v", table.TableName, err) + } else if len(rows) != 1 { + t.Errorf("unexpected row count in %s: %d", table.TableName, len(rows)) + } else if !reflect.DeepEqual(val, rows[0]) { + t.Errorf("select * result: %v != %v", val, rows[0]) + } + + // DELETE row + deleted := _del(dbmap, val) + if deleted != 1 { + t.Errorf("Did not delete row with Id: %d", val.GetId()) + return + } + + // VERIFY deleted + val2 = _get(dbmap, val, val.GetId()) + if val2 != nil { + t.Errorf("Found invoice with id: %d after Delete()", val.GetId()) + } +} + +func TestWithIgnoredColumn(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + ic := &WithIgnoredColumn{-1, 0, 1} + _insert(dbmap, ic) + expected := &WithIgnoredColumn{0, 1, 1} + ic2 := _get(dbmap, WithIgnoredColumn{}, ic.Id).(*WithIgnoredColumn) + + if !reflect.DeepEqual(expected, ic2) { + t.Errorf("%v != %v", expected, ic2) + } + if _del(dbmap, ic) != 1 { + t.Errorf("Did not delete row with Id: %d", ic.Id) + return + } + if _get(dbmap, WithIgnoredColumn{}, ic.Id) != nil { + t.Errorf("Found id: %d after Delete()", ic.Id) + } +} + +func TestTypeConversionExample(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p := Person{FName: "Bob", LName: "Smith"} + tc := &TypeConversionExample{-1, p, CustomStringType("hi")} + _insert(dbmap, tc) + + expected := &TypeConversionExample{1, p, CustomStringType("hi")} + tc2 := _get(dbmap, TypeConversionExample{}, tc.Id).(*TypeConversionExample) + if !reflect.DeepEqual(expected, tc2) { + t.Errorf("tc2 %v != %v", expected, tc2) + } + + tc2.Name = CustomStringType("hi2") + tc2.PersonJSON = Person{FName: "Jane", LName: "Doe"} + _update(dbmap, tc2) + + expected = &TypeConversionExample{1, tc2.PersonJSON, CustomStringType("hi2")} + tc3 := _get(dbmap, TypeConversionExample{}, tc.Id).(*TypeConversionExample) + if !reflect.DeepEqual(expected, tc3) { + t.Errorf("tc3 %v != %v", expected, tc3) + } + + if _del(dbmap, tc) != 1 { + t.Errorf("Did not delete row with Id: %d", tc.Id) + } + +} + +func TestWithEmbeddedStruct(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} + _insert(dbmap, es) + expected := &WithEmbeddedStruct{1, Names{FirstName: "Alice", LastName: "Smith"}} + es2 := _get(dbmap, WithEmbeddedStruct{}, es.Id).(*WithEmbeddedStruct) + if !reflect.DeepEqual(expected, es2) { + t.Errorf("%v != %v", expected, es2) + } + + es2.FirstName = "Bob" + expected.FirstName = "Bob" + _update(dbmap, es2) + es2 = _get(dbmap, WithEmbeddedStruct{}, es.Id).(*WithEmbeddedStruct) + if !reflect.DeepEqual(expected, es2) { + t.Errorf("%v != %v", expected, es2) + } + + ess := _rawselect(dbmap, WithEmbeddedStruct{}, "select * from embedded_struct_test") + if !reflect.DeepEqual(es2, ess[0]) { + t.Errorf("%v != %v", es2, ess[0]) + } +} + +func TestWithEmbeddedStructBeforeAutoincr(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + esba := &WithEmbeddedStructBeforeAutoincrField{Names: Names{FirstName: "Alice", LastName: "Smith"}} + _insert(dbmap, esba) + var expectedAutoincrId int64 = 1 + if esba.Id != expectedAutoincrId { + t.Errorf("%d != %d", expectedAutoincrId, esba.Id) + } +} + +func TestWithEmbeddedAutoincr(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + esa := &WithEmbeddedAutoincr{ + WithEmbeddedStruct: WithEmbeddedStruct{Names: Names{FirstName: "Alice", LastName: "Smith"}}, + MiddleName: "Rose", + } + _insert(dbmap, esa) + var expectedAutoincrId int64 = 1 + if esa.Id != expectedAutoincrId { + t.Errorf("%d != %d", expectedAutoincrId, esa.Id) + } +} + +func TestSelectVal(t *testing.T) { + dbmap := initDbMapNulls() + defer dropAndClose(dbmap) + + bindVar := dbmap.Dialect.BindVar(0) + + t1 := TableWithNull{Str: sql.NullString{"abc", true}, + Int64: sql.NullInt64{78, true}, + Float64: sql.NullFloat64{32.2, true}, + Bool: sql.NullBool{true, true}, + Bytes: []byte("hi")} + _insert(dbmap, &t1) + + // SelectInt + i64 := selectInt(dbmap, "select Int64 from TableWithNull where Str='abc'") + if i64 != 78 { + t.Errorf("int64 %d != 78", i64) + } + i64 = selectInt(dbmap, "select count(*) from TableWithNull") + if i64 != 1 { + t.Errorf("int64 count %d != 1", i64) + } + i64 = selectInt(dbmap, "select count(*) from TableWithNull where Str="+bindVar, "asdfasdf") + if i64 != 0 { + t.Errorf("int64 no rows %d != 0", i64) + } + + // SelectNullInt + n := selectNullInt(dbmap, "select Int64 from TableWithNull where Str='notfound'") + if !reflect.DeepEqual(n, sql.NullInt64{0, false}) { + t.Errorf("nullint %v != 0,false", n) + } + + n = selectNullInt(dbmap, "select Int64 from TableWithNull where Str='abc'") + if !reflect.DeepEqual(n, sql.NullInt64{78, true}) { + t.Errorf("nullint %v != 78, true", n) + } + + // SelectFloat + f64 := selectFloat(dbmap, "select Float64 from TableWithNull where Str='abc'") + if f64 != 32.2 { + t.Errorf("float64 %d != 32.2", f64) + } + f64 = selectFloat(dbmap, "select min(Float64) from TableWithNull") + if f64 != 32.2 { + t.Errorf("float64 min %d != 32.2", f64) + } + f64 = selectFloat(dbmap, "select count(*) from TableWithNull where Str="+bindVar, "asdfasdf") + if f64 != 0 { + t.Errorf("float64 no rows %d != 0", f64) + } + + // SelectNullFloat + nf := selectNullFloat(dbmap, "select Float64 from TableWithNull where Str='notfound'") + if !reflect.DeepEqual(nf, sql.NullFloat64{0, false}) { + t.Errorf("nullfloat %v != 0,false", nf) + } + + nf = selectNullFloat(dbmap, "select Float64 from TableWithNull where Str='abc'") + if !reflect.DeepEqual(nf, sql.NullFloat64{32.2, true}) { + t.Errorf("nullfloat %v != 32.2, true", nf) + } + + // SelectStr + s := selectStr(dbmap, "select Str from TableWithNull where Int64="+bindVar, 78) + if s != "abc" { + t.Errorf("s %s != abc", s) + } + s = selectStr(dbmap, "select Str from TableWithNull where Str='asdfasdf'") + if s != "" { + t.Errorf("s no rows %s != ''", s) + } + + // SelectNullStr + ns := selectNullStr(dbmap, "select Str from TableWithNull where Int64="+bindVar, 78) + if !reflect.DeepEqual(ns, sql.NullString{"abc", true}) { + t.Errorf("nullstr %v != abc,true", ns) + } + ns = selectNullStr(dbmap, "select Str from TableWithNull where Str='asdfasdf'") + if !reflect.DeepEqual(ns, sql.NullString{"", false}) { + t.Errorf("nullstr no rows %v != '',false", ns) + } + + // SelectInt/Str with named parameters + i64 = selectInt(dbmap, "select Int64 from TableWithNull where Str=:abc", map[string]string{"abc": "abc"}) + if i64 != 78 { + t.Errorf("int64 %d != 78", i64) + } + ns = selectNullStr(dbmap, "select Str from TableWithNull where Int64=:num", map[string]int{"num": 78}) + if !reflect.DeepEqual(ns, sql.NullString{"abc", true}) { + t.Errorf("nullstr %v != abc,true", ns) + } +} + +func TestVersionMultipleRows(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + persons := []*Person{ + &Person{0, 0, 0, "Bob", "Smith", 0}, + &Person{0, 0, 0, "Jane", "Smith", 0}, + &Person{0, 0, 0, "Mike", "Smith", 0}, + } + + _insert(dbmap, persons[0], persons[1], persons[2]) + + for x, p := range persons { + if p.Version != 1 { + t.Errorf("person[%d].Version != 1: %d", x, p.Version) + } + } +} + +func TestWithStringPk(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTableWithName(WithStringPk{}, "string_pk_test").SetKeys(true, "Id") + _, err := dbmap.Exec("create table string_pk_test (Id varchar(255), Name varchar(255));") + if err != nil { + t.Errorf("couldn't create string_pk_test: %v", err) + } + defer dropAndClose(dbmap) + + row := &WithStringPk{"1", "foo"} + err = dbmap.Insert(row) + if err == nil { + t.Errorf("Expected error when inserting into table w/non Int PK and autoincr set true") + } +} + +// TestSqlExecutorInterfaceSelects ensures that all DbMap methods starting with Select... +// are also exposed in the SqlExecutor interface. Select... functions can always +// run on Pre/Post hooks. +func TestSqlExecutorInterfaceSelects(t *testing.T) { + dbMapType := reflect.TypeOf(&DbMap{}) + sqlExecutorType := reflect.TypeOf((*SqlExecutor)(nil)).Elem() + numDbMapMethods := dbMapType.NumMethod() + for i := 0; i < numDbMapMethods; i += 1 { + dbMapMethod := dbMapType.Method(i) + if !strings.HasPrefix(dbMapMethod.Name, "Select") { + continue + } + if _, found := sqlExecutorType.MethodByName(dbMapMethod.Name); !found { + t.Errorf("Method %s is defined on DbMap but not implemented in SqlExecutor", + dbMapMethod.Name) + } + } +} + +type WithTime struct { + Id int64 + Time time.Time +} + +type Times struct { + One time.Time + Two time.Time +} + +type EmbeddedTime struct { + Id string + Times +} + +func parseTimeOrPanic(format, date string) time.Time { + t1, err := time.Parse(format, date) + if err != nil { + panic(err) + } + return t1 +} + +// TODO: re-enable next two tests when this is merged: +// https://github.com/ziutek/mymysql/pull/77 +// +// This test currently fails w/MySQL b/c tz info is lost +func testWithTime(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + t1 := parseTimeOrPanic("2006-01-02 15:04:05 -0700 MST", + "2013-08-09 21:30:43 +0800 CST") + w1 := WithTime{1, t1} + _insert(dbmap, &w1) + + obj := _get(dbmap, WithTime{}, w1.Id) + w2 := obj.(*WithTime) + if w1.Time.UnixNano() != w2.Time.UnixNano() { + t.Errorf("%v != %v", w1, w2) + } +} + +// See: https://github.com/coopernurse/gorp/issues/86 +func testEmbeddedTime(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(EmbeddedTime{}).SetKeys(false, "Id") + defer dropAndClose(dbmap) + err := dbmap.CreateTables() + if err != nil { + t.Fatal(err) + } + + time1 := parseTimeOrPanic("2006-01-02 15:04:05", "2013-08-09 21:30:43") + + t1 := &EmbeddedTime{Id: "abc", Times: Times{One: time1, Two: time1.Add(10 * time.Second)}} + _insert(dbmap, t1) + + x := _get(dbmap, EmbeddedTime{}, t1.Id) + t2, _ := x.(*EmbeddedTime) + if t1.One.UnixNano() != t2.One.UnixNano() || t1.Two.UnixNano() != t2.Two.UnixNano() { + t.Errorf("%v != %v", t1, t2) + } +} + +func TestWithTimeSelect(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + halfhourago := time.Now().UTC().Add(-30 * time.Minute) + + w1 := WithTime{1, halfhourago.Add(time.Minute * -1)} + w2 := WithTime{2, halfhourago.Add(time.Second)} + _insert(dbmap, &w1, &w2) + + var caseIds []int64 + _, err := dbmap.Select(&caseIds, "SELECT id FROM time_test WHERE Time < "+dbmap.Dialect.BindVar(0), halfhourago) + + if err != nil { + t.Error(err) + } + if len(caseIds) != 1 { + t.Errorf("%d != 1", len(caseIds)) + } + if caseIds[0] != w1.Id { + t.Errorf("%d != %d", caseIds[0], w1.Id) + } +} + +func TestInvoicePersonView(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + // Create some rows + p1 := &Person{0, 0, 0, "bob", "smith", 0} + dbmap.Insert(p1) + + // notice how we can wire up p1.Id to the invoice easily + inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id, false} + dbmap.Insert(inv1) + + // Run your query + query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + + // pass a slice of pointers to Select() + // this avoids the need to type assert after the query is run + var list []*InvoicePersonView + _, err := dbmap.Select(&list, query) + if err != nil { + panic(err) + } + + // this should test true + expected := &InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName, 0} + if !reflect.DeepEqual(list[0], expected) { + t.Errorf("%v != %v", list[0], expected) + } +} + +func TestQuoteTableNames(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + quotedTableName := dbmap.Dialect.QuoteField("person_test") + + // Use a buffer to hold the log to check generated queries + logBuffer := &bytes.Buffer{} + dbmap.TraceOn("", log.New(logBuffer, "gorptest:", log.Lmicroseconds)) + + // Create some rows + p1 := &Person{0, 0, 0, "bob", "smith", 0} + errorTemplate := "Expected quoted table name %v in query but didn't find it" + + // Check if Insert quotes the table name + id := dbmap.Insert(p1) + if !bytes.Contains(logBuffer.Bytes(), []byte(quotedTableName)) { + t.Errorf(errorTemplate, quotedTableName) + } + logBuffer.Reset() + + // Check if Get quotes the table name + dbmap.Get(Person{}, id) + if !bytes.Contains(logBuffer.Bytes(), []byte(quotedTableName)) { + t.Errorf(errorTemplate, quotedTableName) + } + logBuffer.Reset() +} + +func TestSelectTooManyCols(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "bob", "smith", 0} + p2 := &Person{0, 0, 0, "jane", "doe", 0} + _insert(dbmap, p1) + _insert(dbmap, p2) + + obj := _get(dbmap, Person{}, p1.Id) + p1 = obj.(*Person) + obj = _get(dbmap, Person{}, p2.Id) + p2 = obj.(*Person) + + params := map[string]interface{}{ + "Id": p1.Id, + } + + var p3 FNameOnly + err := dbmap.SelectOne(&p3, "select * from person_test where Id=:Id", params) + if err != nil { + if !NonFatalError(err) { + t.Error(err) + } + } else { + t.Errorf("Non-fatal error expected") + } + + if p1.FName != p3.FName { + t.Errorf("%v != %v", p1.FName, p3.FName) + } + + var pSlice []FNameOnly + _, err = dbmap.Select(&pSlice, "select * from person_test order by fname asc") + if err != nil { + if !NonFatalError(err) { + t.Error(err) + } + } else { + t.Errorf("Non-fatal error expected") + } + + if p1.FName != pSlice[0].FName { + t.Errorf("%v != %v", p1.FName, pSlice[0].FName) + } + if p2.FName != pSlice[1].FName { + t.Errorf("%v != %v", p2.FName, pSlice[1].FName) + } +} + +func TestSelectSingleVal(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "bob", "smith", 0} + _insert(dbmap, p1) + + obj := _get(dbmap, Person{}, p1.Id) + p1 = obj.(*Person) + + params := map[string]interface{}{ + "Id": p1.Id, + } + + var p2 Person + err := dbmap.SelectOne(&p2, "select * from person_test where Id=:Id", params) + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(p1, &p2) { + t.Errorf("%v != %v", p1, &p2) + } + + // verify SelectOne allows non-struct holders + var s string + err = dbmap.SelectOne(&s, "select FName from person_test where Id=:Id", params) + if err != nil { + t.Error(err) + } + if s != "bob" { + t.Error("Expected bob but got: " + s) + } + + // verify SelectOne requires pointer receiver + err = dbmap.SelectOne(s, "select FName from person_test where Id=:Id", params) + if err == nil { + t.Error("SelectOne should have returned error for non-pointer holder") + } + + // verify SelectOne works with uninitialized pointers + var p3 *Person + err = dbmap.SelectOne(&p3, "select * from person_test where Id=:Id", params) + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(p1, p3) { + t.Errorf("%v != %v", p1, p3) + } + + // verify that the receiver is still nil if nothing was found + var p4 *Person + dbmap.SelectOne(&p3, "select * from person_test where 2<1 AND Id=:Id", params) + if p4 != nil { + t.Error("SelectOne should not have changed a nil receiver when no rows were found") + } + + // verify that the error is set to sql.ErrNoRows if not found + err = dbmap.SelectOne(&p2, "select * from person_test where Id=:Id", map[string]interface{}{ + "Id": -2222, + }) + if err == nil || err != sql.ErrNoRows { + t.Error("SelectOne should have returned an sql.ErrNoRows") + } + + _insert(dbmap, &Person{0, 0, 0, "bob", "smith", 0}) + err = dbmap.SelectOne(&p2, "select * from person_test where Fname='bob'") + if err == nil { + t.Error("Expected error when two rows found") + } + + // tests for #150 + var tInt int64 + var tStr string + var tBool bool + var tFloat float64 + primVals := []interface{}{tInt, tStr, tBool, tFloat} + for _, prim := range primVals { + err = dbmap.SelectOne(&prim, "select * from person_test where Id=-123") + if err == nil || err != sql.ErrNoRows { + t.Error("primVals: SelectOne should have returned sql.ErrNoRows") + } + } +} + +func TestSelectAlias(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &IdCreatedExternal{IdCreated: IdCreated{Id: 1, Created: 3}, External: 2} + + // Insert using embedded IdCreated, which reflects the structure of the table + _insert(dbmap, &p1.IdCreated) + + // Select into IdCreatedExternal type, which includes some fields not present + // in id_created_test + var p2 IdCreatedExternal + err := dbmap.SelectOne(&p2, "select * from id_created_test where Id=1") + if err != nil { + t.Error(err) + } + if p2.Id != 1 || p2.Created != 3 || p2.External != 0 { + t.Error("Expected ignored field defaults to not set") + } + + // Prove that we can supply an aliased value in the select, and that it will + // automatically map to IdCreatedExternal.External + err = dbmap.SelectOne(&p2, "SELECT *, 1 AS external FROM id_created_test") + if err != nil { + t.Error(err) + } + if p2.External != 1 { + t.Error("Expected select as can map to exported field.") + } + + var rows *sql.Rows + var cols []string + rows, err = dbmap.Db.Query("SELECT * FROM id_created_test") + cols, err = rows.Columns() + if err != nil || len(cols) != 2 { + t.Error("Expected ignored column not created") + } +} + +func TestMysqlPanicIfDialectNotInitialized(t *testing.T) { + _, driver := dialectAndDriver() + // this test only applies to MySQL + if os.Getenv("GORP_TEST_DIALECT") != "mysql" { + return + } + + // The expected behaviour is to catch a panic. + // Here is the deferred function which will check if a panic has indeed occurred : + defer func() { + r := recover() + if r == nil { + t.Error("db.CreateTables() should panic if db is initialized with an incorrect MySQLDialect") + } + }() + + // invalid MySQLDialect : does not contain Engine or Encoding specification + dialect := MySQLDialect{} + db := &DbMap{Db: connect(driver), Dialect: dialect} + db.AddTableWithName(Invoice{}, "invoice") + // the following call should panic : + db.CreateTables() +} + +func TestSingleColumnKeyDbReturnsZeroRowsUpdatedOnPKChange(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + dbmap.AddTableWithName(SingleColumnTable{}, "single_column_table").SetKeys(false, "SomeId") + err := dbmap.DropTablesIfExists() + if err != nil { + t.Error("Drop tables failed") + } + err = dbmap.CreateTablesIfNotExists() + if err != nil { + t.Error("Create tables failed") + } + err = dbmap.TruncateTables() + if err != nil { + t.Error("Truncate tables failed") + } + + sct := SingleColumnTable{ + SomeId: "A Unique Id String", + } + + count, err := dbmap.Update(&sct) + if err != nil { + t.Error(err) + } + if count != 0 { + t.Errorf("Expected 0 updated rows, got %d", count) + } + +} + +func TestPrepare(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv1 := &Invoice{0, 100, 200, "prepare-foo", 0, false} + inv2 := &Invoice{0, 100, 200, "prepare-bar", 0, false} + _insert(dbmap, inv1, inv2) + + bindVar0 := dbmap.Dialect.BindVar(0) + bindVar1 := dbmap.Dialect.BindVar(1) + stmt, err := dbmap.Prepare(fmt.Sprintf("UPDATE invoice_test SET Memo=%s WHERE Id=%s", bindVar0, bindVar1)) + if err != nil { + t.Error(err) + } + defer stmt.Close() + _, err = stmt.Exec("prepare-baz", inv1.Id) + if err != nil { + t.Error(err) + } + err = dbmap.SelectOne(inv1, "SELECT * from invoice_test WHERE Memo='prepare-baz'") + if err != nil { + t.Error(err) + } + + trans, err := dbmap.Begin() + if err != nil { + t.Error(err) + } + transStmt, err := trans.Prepare(fmt.Sprintf("UPDATE invoice_test SET IsPaid=%s WHERE Id=%s", bindVar0, bindVar1)) + if err != nil { + t.Error(err) + } + defer transStmt.Close() + _, err = transStmt.Exec(true, inv2.Id) + if err != nil { + t.Error(err) + } + err = dbmap.SelectOne(inv2, fmt.Sprintf("SELECT * from invoice_test WHERE IsPaid=%s", bindVar0), true) + if err == nil || err != sql.ErrNoRows { + t.Error("SelectOne should have returned an sql.ErrNoRows") + } + err = trans.SelectOne(inv2, fmt.Sprintf("SELECT * from invoice_test WHERE IsPaid=%s", bindVar0), true) + if err != nil { + t.Error(err) + } + err = trans.Commit() + if err != nil { + t.Error(err) + } + err = dbmap.SelectOne(inv2, fmt.Sprintf("SELECT * from invoice_test WHERE IsPaid=%s", bindVar0), true) + if err != nil { + t.Error(err) + } +} + +func BenchmarkNativeCrud(b *testing.B) { + b.StopTimer() + dbmap := initDbMapBench() + defer dropAndClose(dbmap) + b.StartTimer() + + insert := "insert into invoice_test (Created, Updated, Memo, PersonId) values (?, ?, ?, ?)" + sel := "select Id, Created, Updated, Memo, PersonId from invoice_test where Id=?" + update := "update invoice_test set Created=?, Updated=?, Memo=?, PersonId=? where Id=?" + delete := "delete from invoice_test where Id=?" + + inv := &Invoice{0, 100, 200, "my memo", 0, false} + + for i := 0; i < b.N; i++ { + res, err := dbmap.Db.Exec(insert, inv.Created, inv.Updated, + inv.Memo, inv.PersonId) + if err != nil { + panic(err) + } + + newid, err := res.LastInsertId() + if err != nil { + panic(err) + } + inv.Id = newid + + row := dbmap.Db.QueryRow(sel, inv.Id) + err = row.Scan(&inv.Id, &inv.Created, &inv.Updated, &inv.Memo, + &inv.PersonId) + if err != nil { + panic(err) + } + + inv.Created = 1000 + inv.Updated = 2000 + inv.Memo = "my memo 2" + inv.PersonId = 3000 + + _, err = dbmap.Db.Exec(update, inv.Created, inv.Updated, inv.Memo, + inv.PersonId, inv.Id) + if err != nil { + panic(err) + } + + _, err = dbmap.Db.Exec(delete, inv.Id) + if err != nil { + panic(err) + } + } + +} + +func BenchmarkGorpCrud(b *testing.B) { + b.StopTimer() + dbmap := initDbMapBench() + defer dropAndClose(dbmap) + b.StartTimer() + + inv := &Invoice{0, 100, 200, "my memo", 0, true} + for i := 0; i < b.N; i++ { + err := dbmap.Insert(inv) + if err != nil { + panic(err) + } + + obj, err := dbmap.Get(Invoice{}, inv.Id) + if err != nil { + panic(err) + } + + inv2, ok := obj.(*Invoice) + if !ok { + panic(fmt.Sprintf("expected *Invoice, got: %v", obj)) + } + + inv2.Created = 1000 + inv2.Updated = 2000 + inv2.Memo = "my memo 2" + inv2.PersonId = 3000 + _, err = dbmap.Update(inv2) + if err != nil { + panic(err) + } + + _, err = dbmap.Delete(inv2) + if err != nil { + panic(err) + } + + } +} + +func initDbMapBench() *DbMap { + dbmap := newDbMap() + dbmap.Db.Exec("drop table if exists invoice_test") + dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + return dbmap +} + +func initDbMap() *DbMap { + dbmap := newDbMap() + dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") + dbmap.AddTableWithName(InvoiceTag{}, "invoice_tag_test").SetKeys(true, "myid") + dbmap.AddTableWithName(AliasTransientField{}, "alias_trans_field_test").SetKeys(true, "id") + dbmap.AddTableWithName(OverriddenInvoice{}, "invoice_override_test").SetKeys(false, "Id") + dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithIgnoredColumn{}, "ignored_column_test").SetKeys(true, "Id") + dbmap.AddTableWithName(IdCreated{}, "id_created_test").SetKeys(true, "Id") + dbmap.AddTableWithName(TypeConversionExample{}, "type_conv_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithEmbeddedStruct{}, "embedded_struct_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithEmbeddedStructBeforeAutoincrField{}, "embedded_struct_before_autoincr_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithEmbeddedAutoincr{}, "embedded_autoincr_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithTime{}, "time_test").SetKeys(true, "Id") + dbmap.TypeConverter = testTypeConverter{} + err := dbmap.DropTablesIfExists() + if err != nil { + panic(err) + } + err = dbmap.CreateTables() + if err != nil { + panic(err) + } + + // See #146 and TestSelectAlias - this type is mapped to the same + // table as IdCreated, but includes an extra field that isn't in the table + dbmap.AddTableWithName(IdCreatedExternal{}, "id_created_test").SetKeys(true, "Id") + + return dbmap +} + +func initDbMapNulls() *DbMap { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(TableWithNull{}).SetKeys(false, "Id") + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + return dbmap +} + +func newDbMap() *DbMap { + dialect, driver := dialectAndDriver() + dbmap := &DbMap{Db: connect(driver), Dialect: dialect} + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + return dbmap +} + +func dropAndClose(dbmap *DbMap) { + dbmap.DropTablesIfExists() + dbmap.Db.Close() +} + +func connect(driver string) *sql.DB { + dsn := os.Getenv("GORP_TEST_DSN") + if dsn == "" { + panic("GORP_TEST_DSN env variable is not set. Please see README.md") + } + + db, err := sql.Open(driver, dsn) + if err != nil { + panic("Error connecting to db: " + err.Error()) + } + return db +} + +func dialectAndDriver() (Dialect, string) { + switch os.Getenv("GORP_TEST_DIALECT") { + case "mysql": + return MySQLDialect{"InnoDB", "UTF8"}, "mymysql" + case "gomysql": + return MySQLDialect{"InnoDB", "UTF8"}, "mysql" + case "postgres": + return PostgresDialect{}, "postgres" + case "sqlite": + return SqliteDialect{}, "sqlite3" + } + panic("GORP_TEST_DIALECT env variable is not set or is invalid. Please see README.md") +} + +func _insert(dbmap *DbMap, list ...interface{}) { + err := dbmap.Insert(list...) + if err != nil { + panic(err) + } +} + +func _update(dbmap *DbMap, list ...interface{}) int64 { + count, err := dbmap.Update(list...) + if err != nil { + panic(err) + } + return count +} + +func _del(dbmap *DbMap, list ...interface{}) int64 { + count, err := dbmap.Delete(list...) + if err != nil { + panic(err) + } + + return count +} + +func _get(dbmap *DbMap, i interface{}, keys ...interface{}) interface{} { + obj, err := dbmap.Get(i, keys...) + if err != nil { + panic(err) + } + + return obj +} + +func selectInt(dbmap *DbMap, query string, args ...interface{}) int64 { + i64, err := SelectInt(dbmap, query, args...) + if err != nil { + panic(err) + } + + return i64 +} + +func selectNullInt(dbmap *DbMap, query string, args ...interface{}) sql.NullInt64 { + i64, err := SelectNullInt(dbmap, query, args...) + if err != nil { + panic(err) + } + + return i64 +} + +func selectFloat(dbmap *DbMap, query string, args ...interface{}) float64 { + f64, err := SelectFloat(dbmap, query, args...) + if err != nil { + panic(err) + } + + return f64 +} + +func selectNullFloat(dbmap *DbMap, query string, args ...interface{}) sql.NullFloat64 { + f64, err := SelectNullFloat(dbmap, query, args...) + if err != nil { + panic(err) + } + + return f64 +} + +func selectStr(dbmap *DbMap, query string, args ...interface{}) string { + s, err := SelectStr(dbmap, query, args...) + if err != nil { + panic(err) + } + + return s +} + +func selectNullStr(dbmap *DbMap, query string, args ...interface{}) sql.NullString { + s, err := SelectNullStr(dbmap, query, args...) + if err != nil { + panic(err) + } + + return s +} + +func _rawexec(dbmap *DbMap, query string, args ...interface{}) sql.Result { + res, err := dbmap.Exec(query, args...) + if err != nil { + panic(err) + } + return res +} + +func _rawselect(dbmap *DbMap, i interface{}, query string, args ...interface{}) []interface{} { + list, err := dbmap.Select(i, query, args...) + if err != nil { + panic(err) + } + return list +} diff --git a/Godeps/_workspace/src/gopkg.in/gorp.v1/test_all.sh b/Godeps/_workspace/src/gopkg.in/gorp.v1/test_all.sh new file mode 100644 index 00000000..f870b39a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/gorp.v1/test_all.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# on macs, you may need to: +# export GOBUILDFLAG=-ldflags -linkmode=external + +set -e + +export GORP_TEST_DSN=gorptest/gorptest/gorptest +export GORP_TEST_DIALECT=mysql +go test $GOBUILDFLAG . + +export GORP_TEST_DSN=gorptest:gorptest@/gorptest +export GORP_TEST_DIALECT=gomysql +go test $GOBUILDFLAG . + +export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable" +export GORP_TEST_DIALECT=postgres +go test $GOBUILDFLAG . + +export GORP_TEST_DSN=/tmp/gorptest.bin +export GORP_TEST_DIALECT=sqlite +go test $GOBUILDFLAG . From c16e3b5a10d2cf738572e49a0426c619326a395d Mon Sep 17 00:00:00 2001 From: Bobby Rullo Date: Wed, 19 Aug 2015 15:40:36 -0700 Subject: [PATCH 2/4] db: add DB migration code and scripts. --- build | 15 + db/client.go | 2 +- db/migrate.go | 52 +++ db/migrate_test.go | 42 +++ db/migrations/0001_initial_migration.sql | 47 +++ db/migrations/0002_dex_admin.sql | 2 + db/migrations/0003_user_created_at.sql | 2 + db/migrations/0004_session_nonce.sql | 2 + db/migrations/0005_refresh_token_create.sql | 21 ++ db/migrations/0006_user_email_unique.sql | 3 + db/migrations/assets.go | 377 ++++++++++++++++++++ db/user.go | 5 +- 12 files changed, 568 insertions(+), 2 deletions(-) create mode 100644 db/migrate.go create mode 100644 db/migrate_test.go create mode 100644 db/migrations/0001_initial_migration.sql create mode 100644 db/migrations/0002_dex_admin.sql create mode 100644 db/migrations/0003_user_created_at.sql create mode 100644 db/migrations/0004_session_nonce.sql create mode 100644 db/migrations/0005_refresh_token_create.sql create mode 100644 db/migrations/0006_user_email_unique.sql create mode 100644 db/migrations/assets.go diff --git a/build b/build index ba9e9d5d..899ba9cb 100755 --- a/build +++ b/build @@ -3,6 +3,21 @@ export GOPATH=${PWD}/Godeps/_workspace export GOBIN=${PWD}/bin +if command -v go-bindata &>/dev/null; then + DEX_MIGRATE_FROM_DISK=${DEX_MIGRATE_FROM_DISK:=false} + + echo "Turning migrations into ./db/migrations/assets.go" + if [ "$DEX_MIGRATE_FROM_DISK" = true ]; then + echo "Compiling migrations.go: will read migrations from disk." + else + echo "Compiling migrations into migrations.go" + fi + go-bindata -debug=$DEX_MIGRATE_FROM_DISK -modtime=1 -pkg migrations -o ./db/migrations/assets.go ./db/migrations + gofmt -w ./db/migrations/assets.go +else + echo "Could not find go-bindata in path, will not generate migrations" +fi + rm -rf $GOPATH/src/github.com/coreos/dex mkdir -p $GOPATH/src/github.com/coreos/ ln -s ${PWD} $GOPATH/src/github.com/coreos/dex diff --git a/db/client.go b/db/client.go index dd04a18d..23cc6108 100644 --- a/db/client.go +++ b/db/client.go @@ -66,7 +66,7 @@ type clientIdentityModel struct { ID string `db:"id"` Secret []byte `db:"secret"` Metadata string `db:"metadata"` - DexAdmin bool `db:"dexAdmin"` + DexAdmin bool `db:"dex_admin"` } func newClientMetadataJSON(cm *oidc.ClientMetadata) *clientMetadataJSON { diff --git a/db/migrate.go b/db/migrate.go new file mode 100644 index 00000000..bdb42366 --- /dev/null +++ b/db/migrate.go @@ -0,0 +1,52 @@ +package db + +import ( + "fmt" + + "github.com/coopernurse/gorp" + "github.com/lib/pq" + migrate "github.com/rubenv/sql-migrate" + + "github.com/coreos/dex/db/migrations" +) + +const ( + migrationDialect = "postgres" + migrationTable = "dex_migrations" + migrationDir = "db/migrations" +) + +func init() { + migrate.SetTable(migrationTable) +} + +func MigrateToLatest(dbMap *gorp.DbMap) (int, error) { + source := getSource() + + return migrate.Exec(dbMap.Db, migrationDialect, source, migrate.Up) +} + +func MigrateMaxMigrations(dbMap *gorp.DbMap, max int) (int, error) { + source := getSource() + + return migrate.ExecMax(dbMap.Db, migrationDialect, source, migrate.Up, max) +} + +func GetPlannedMigrations(dbMap *gorp.DbMap) ([]*migrate.PlannedMigration, error) { + migrations, _, err := migrate.PlanMigration(dbMap.Db, migrationDialect, getSource(), migrate.Up, 0) + return migrations, err +} + +func DropMigrationsTable(dbMap *gorp.DbMap) error { + qt := pq.QuoteIdentifier(migrationTable) + _, err := dbMap.Exec(fmt.Sprintf("drop table if exists %s ;", qt)) + return err +} + +func getSource() migrate.MigrationSource { + return &migrate.AssetMigrationSource{ + Dir: migrationDir, + Asset: migrations.Asset, + AssetDir: migrations.AssetDir, + } +} diff --git a/db/migrate_test.go b/db/migrate_test.go new file mode 100644 index 00000000..41c9b51f --- /dev/null +++ b/db/migrate_test.go @@ -0,0 +1,42 @@ +package db + +import ( + "fmt" + "os" + "testing" + + "github.com/coopernurse/gorp" +) + +func initDB(dsn string) *gorp.DbMap { + c, err := NewConnection(Config{DSN: dsn}) + if err != nil { + panic(fmt.Sprintf("error making db connection: %q", err)) + } + if err = c.DropTablesIfExists(); err != nil { + panic(fmt.Sprintf("Unable to drop database tables: %v", err)) + } + + return c +} + +// TestGetPlannedMigrations is a sanity check, ensuring that at least one +// migration can be found. +func TestGetPlannedMigrations(t *testing.T) { + dsn := os.Getenv("DEX_TEST_DSN") + if dsn == "" { + t.Logf("Test will not run without DEX_TEST_DSN environment variable.") + return + } + dbMap := initDB(dsn) + ms, err := GetPlannedMigrations(dbMap) + if err != nil { + pwd, err := os.Getwd() + t.Logf("pwd: %v", pwd) + t.Fatalf("unexpected err: %q", err) + } + + if len(ms) == 0 { + t.Fatalf("expected non-empty migrations") + } +} diff --git a/db/migrations/0001_initial_migration.sql b/db/migrations/0001_initial_migration.sql new file mode 100644 index 00000000..d2350b7a --- /dev/null +++ b/db/migrations/0001_initial_migration.sql @@ -0,0 +1,47 @@ +-- +migrate Up +CREATE TABLE IF NOT EXISTS "authd_user" ( + "id" text not null primary key, + "email" text, + "email_verified" boolean, + "display_name" text, + "admin" boolean) ; + +CREATE TABLE IF NOT EXISTS "client_identity" ( + "id" text not null primary key, + "secret" bytea, + "metadata" text); + +CREATE TABLE IF NOT EXISTS "connector_config" ( + "id" text not null primary key, + "type" text, "config" text) ; + +CREATE TABLE IF NOT EXISTS "key" ( + "value" bytea not null primary key) ; + +CREATE TABLE IF NOT EXISTS "password_info" ( + "user_id" text not null primary key, + "password" text, + "password_expires" bigint) ; + +CREATE TABLE IF NOT EXISTS "session" ( + "id" text not null primary key, + "state" text, + "created_at" bigint, + "expires_at" bigint, + "client_id" text, + "client_state" text, + "redirect_url" text, "identity" text, + "connector_id" text, + "user_id" text, "register" boolean) ; + +CREATE TABLE IF NOT EXISTS "session_key" ( + "key" text not null primary key, + "session_id" text, + "expires_at" bigint, + "stale" boolean) ; + +CREATE TABLE IF NOT EXISTS "remote_identity_mapping" ( + "connector_id" text not null, + "user_id" text, + "remote_id" text not null, + primary key ("connector_id", "remote_id")) ; diff --git a/db/migrations/0002_dex_admin.sql b/db/migrations/0002_dex_admin.sql new file mode 100644 index 00000000..b5651599 --- /dev/null +++ b/db/migrations/0002_dex_admin.sql @@ -0,0 +1,2 @@ +-- +migrate Up +ALTER TABLE client_identity ADD COLUMN "dex_admin" boolean; diff --git a/db/migrations/0003_user_created_at.sql b/db/migrations/0003_user_created_at.sql new file mode 100644 index 00000000..976079bb --- /dev/null +++ b/db/migrations/0003_user_created_at.sql @@ -0,0 +1,2 @@ +-- +migrate Up +ALTER TABLE authd_user ADD COLUMN "created_at" bigint; diff --git a/db/migrations/0004_session_nonce.sql b/db/migrations/0004_session_nonce.sql new file mode 100644 index 00000000..2da5b392 --- /dev/null +++ b/db/migrations/0004_session_nonce.sql @@ -0,0 +1,2 @@ +-- +migrate Up +ALTER TABLE session ADD COLUMN "nonce" text; diff --git a/db/migrations/0005_refresh_token_create.sql b/db/migrations/0005_refresh_token_create.sql new file mode 100644 index 00000000..acda8e29 --- /dev/null +++ b/db/migrations/0005_refresh_token_create.sql @@ -0,0 +1,21 @@ +-- +migrate Up +CREATE TABLE refresh_token ( + id bigint NOT NULL, + payload_hash text, + user_id text, + client_id text +); + +CREATE SEQUENCE refresh_token_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +ALTER SEQUENCE refresh_token_id_seq OWNED BY refresh_token.id; + +ALTER TABLE ONLY refresh_token ALTER COLUMN id SET DEFAULT nextval('refresh_token_id_seq'::regclass); + +ALTER TABLE ONLY refresh_token + ADD CONSTRAINT refresh_token_pkey PRIMARY KEY (id); diff --git a/db/migrations/0006_user_email_unique.sql b/db/migrations/0006_user_email_unique.sql new file mode 100644 index 00000000..3a42a72c --- /dev/null +++ b/db/migrations/0006_user_email_unique.sql @@ -0,0 +1,3 @@ +-- +migrate Up +ALTER TABLE ONLY authd_user + ADD CONSTRAINT authd_user_email_key UNIQUE (email); diff --git a/db/migrations/assets.go b/db/migrations/assets.go new file mode 100644 index 00000000..33326353 --- /dev/null +++ b/db/migrations/assets.go @@ -0,0 +1,377 @@ +// Code generated by go-bindata. +// sources: +// db/migrations/0001_initial_migration.sql +// db/migrations/0002_dex_admin.sql +// db/migrations/0003_user_created_at.sql +// db/migrations/0004_session_nonce.sql +// db/migrations/0005_refresh_token_create.sql +// db/migrations/0006_user_email_unique.sql +// db/migrations/assets.go +// DO NOT EDIT! + +package migrations + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dbMigrations0001_initial_migrationSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x9c\x93\xcf\x8a\xdb\x30\x10\xc6\xef\x79\x0a\x91\xd3\x86\x66\x9f\xa0\xa7\x6d\x71\x61\xa1\xb4\xd0\x75\xa1\x37\x31\xb5\x26\xee\x50\xfd\x43\x1a\x6f\xd7\x6f\x5f\xd9\x4e\x14\xc7\xce\xc6\x26\x81\x84\x58\xd6\xfc\xe6\x9b\x6f\x66\x1e\x1f\xc5\x07\x43\x75\x00\x46\xf1\xd3\x6f\x3e\xff\x28\x9e\xca\x42\x94\x4f\x9f\xbe\x16\xe2\xf9\x8b\xf8\xf6\xbd\x14\xc5\xaf\xe7\x97\xf2\x45\x6c\xa1\xe1\x3f\x4a\x36\x11\xc3\x56\x3c\x6c\xc4\xf0\xd9\x92\xda\x0a\xc6\x37\x16\xd6\xa5\x6f\xa3\xb5\xf0\x81\x0c\x84\x56\xfc\xc5\x76\x9f\xaf\xa1\x01\xd2\xc3\xcd\xc9\xa1\x7c\xc5\x40\x07\xc2\xc4\xf9\xed\x9c\x46\xb0\xe7\x0b\x8a\xa2\xd7\xd0\x4a\x0b\x06\xa7\xc1\xa0\x0c\xd9\x1c\xb3\x13\x1f\x37\x37\xd5\x57\x9a\xd0\xb2\x24\x95\x7e\x89\xdb\x3b\x4a\x88\x58\x05\xe4\x94\xb1\x65\x84\xf3\xb1\x41\x06\x05\x0c\x03\x63\xb7\x24\xc3\x59\x8b\x15\xbb\x20\xd3\xbf\x03\xd5\x77\xe8\xe0\xd6\x9f\xcc\xe8\x79\x3d\xa5\xcf\xbd\xe4\x41\xc2\x8c\xf3\xbd\x82\x6e\xf0\x58\xcf\xd5\x9c\x8b\x40\x0f\x31\xfe\x73\x41\x49\xb2\x07\x37\x46\x77\x53\x22\x57\xd6\x73\x82\x4c\x1b\x9c\xe1\xf8\xe6\x29\x60\x4c\x4a\xa9\x26\xbb\x5c\x66\xc4\x18\xc9\xd9\x7b\x5a\xcc\x69\x0f\xa6\x3a\x52\xd7\xd3\xa9\x92\xc0\x27\x05\xa3\x09\x1e\xa4\x5d\x7d\x97\x27\x6e\x06\x1c\x5e\x5c\x4d\x16\x50\x25\x60\xc5\xb2\x09\x3a\x37\xf9\x3c\xb4\x13\x50\x1e\xa6\x79\x92\x8b\x0e\xec\x3b\x70\x4d\x91\xbb\xdd\x5d\xbb\x31\x47\x1b\xe5\x64\x6a\xfa\xc7\x55\xeb\x32\x84\xcf\xa5\xdd\x32\x2d\x99\xa2\x71\xbd\xc6\x80\xc6\x31\xe6\xad\x96\x06\xbc\x27\x7b\xb1\x55\x73\x93\xb2\xf0\xf7\xdc\x1a\x75\xe3\x88\x7f\x2f\x70\x54\xb9\x78\xb8\x4c\xb5\x1f\x87\xef\xba\x4a\xfe\x07\x00\x00\xff\xff\xae\xbe\x65\x61\x6c\x05\x00\x00") + +func dbMigrations0001_initial_migrationSqlBytes() ([]byte, error) { + return bindataRead( + _dbMigrations0001_initial_migrationSql, + "db/migrations/0001_initial_migration.sql", + ) +} + +func dbMigrations0001_initial_migrationSql() (*asset, error) { + bytes, err := dbMigrations0001_initial_migrationSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "db/migrations/0001_initial_migration.sql", size: 1388, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dbMigrations0002_dex_adminSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\xce\xc9\x4c\xcd\x2b\x89\xcf\x4c\x01\x92\x99\x25\x95\x0a\x8e\x2e\x2e\x0a\xce\xfe\x3e\xa1\xbe\x7e\x0a\x4a\x29\xa9\x15\xf1\x89\x29\xb9\x99\x79\x4a\x0a\x49\xf9\xf9\x39\xa9\x89\x79\xd6\x5c\x80\x00\x00\x00\xff\xff\xfd\xb4\x6c\x60\x4b\x00\x00\x00") + +func dbMigrations0002_dex_adminSqlBytes() ([]byte, error) { + return bindataRead( + _dbMigrations0002_dex_adminSql, + "db/migrations/0002_dex_admin.sql", + ) +} + +func dbMigrations0002_dex_adminSql() (*asset, error) { + bytes, err := dbMigrations0002_dex_adminSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "db/migrations/0002_dex_admin.sql", size: 75, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dbMigrations0003_user_created_atSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x2d\xc9\x48\x89\x2f\x2d\x4e\x2d\x52\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x50\x4a\x2e\x4a\x05\x2a\x4e\x89\x4f\x2c\x51\x52\x48\xca\x4c\xcf\xcc\x2b\xb1\xe6\x02\x04\x00\x00\xff\xff\xf4\xa1\xf0\x95\x46\x00\x00\x00") + +func dbMigrations0003_user_created_atSqlBytes() ([]byte, error) { + return bindataRead( + _dbMigrations0003_user_created_atSql, + "db/migrations/0003_user_created_at.sql", + ) +} + +func dbMigrations0003_user_created_atSql() (*asset, error) { + bytes, err := dbMigrations0003_user_created_atSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "db/migrations/0003_user_created_at.sql", size: 70, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dbMigrations0004_session_nonceSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4e\x2d\x2e\xce\xcc\xcf\x53\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x50\xca\xcb\xcf\x4b\x4e\x55\x52\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x77\x11\x16\x8b\x3c\x00\x00\x00") + +func dbMigrations0004_session_nonceSqlBytes() ([]byte, error) { + return bindataRead( + _dbMigrations0004_session_nonceSql, + "db/migrations/0004_session_nonce.sql", + ) +} + +func dbMigrations0004_session_nonceSql() (*asset, error) { + bytes, err := dbMigrations0004_session_nonceSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "db/migrations/0004_session_nonce.sql", size: 60, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dbMigrations0005_refresh_token_createSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x84\x90\x51\x4f\xc2\x30\x10\xc7\xdf\xf7\x29\xee\x0d\x88\x62\xc2\x2b\x3e\x95\xed\x0c\x8b\x5d\xa7\x5d\x2b\xf2\xb4\x54\x56\x59\xc3\x1c\x73\xad\x46\xbe\xbd\x65\x13\x08\xc4\x68\xdf\xfa\xbb\xfb\xdf\x2f\x77\xe3\x31\x5c\xbd\x99\x75\xab\x9c\x06\xd9\x04\x21\x47\x22\x10\x04\x99\x51\x84\x56\xbf\xb6\xda\x96\xb9\xdb\x6e\x74\x0d\xc3\x00\xfc\x33\x05\xbc\x98\xb5\xa9\x1d\xb0\x54\x00\x93\x94\x5e\x77\xbc\x51\xbb\x6a\xab\x8a\xbc\x54\xb6\x04\xa7\xbf\x5c\x8f\x3f\xac\x6e\x73\x9f\x39\x91\x55\x65\x74\xed\x0e\x2c\x18\xdd\x06\x07\x69\x86\x8f\x12\x59\x78\xe1\xf5\x9d\xb9\xd5\xef\x5d\x36\x13\x84\x0b\x58\xc4\x62\x0e\x93\x0e\xc4\xcc\x67\x13\x64\x02\x66\xcb\x1f\xc4\x52\x48\x62\xf6\x44\xa8\xc4\xe3\x9f\x3c\x9f\xfe\x21\x09\xe7\x08\x13\xaf\x25\x54\x20\xff\xdb\x0a\xe9\x82\x61\xb4\x1f\x7e\x56\xbd\x31\xc5\x31\xdf\x9f\x2a\x65\xf4\xa2\x07\xfa\x72\x98\x52\x99\xb0\xfd\xd9\x32\x14\x10\xe1\x1d\x91\x54\x40\xed\x57\xff\x54\xd5\x70\xf0\x9b\x74\x30\x9d\xb6\x7a\xbd\xaa\x94\xb5\xa3\x7f\x35\xdd\x4e\x24\x8a\xbc\x88\x65\x82\x93\xd8\xdf\xe2\x7c\x68\xb3\xd1\x3b\x78\xe0\x71\x42\xf8\x12\xee\x71\x09\x43\x53\xf8\xb9\xdf\x01\x00\x00\xff\xff\xff\xeb\x3d\xc4\xf9\x01\x00\x00") + +func dbMigrations0005_refresh_token_createSqlBytes() ([]byte, error) { + return bindataRead( + _dbMigrations0005_refresh_token_createSql, + "db/migrations/0005_refresh_token_create.sql", + ) +} + +func dbMigrations0005_refresh_token_createSql() (*asset, error) { + bytes, err := dbMigrations0005_refresh_token_createSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "db/migrations/0005_refresh_token_create.sql", size: 505, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dbMigrations0006_user_email_uniqueSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\xf0\xf7\xf3\x89\x54\x48\x2c\x2d\xc9\x48\x89\x2f\x2d\x4e\x2d\xe2\x52\x00\x02\x47\x17\x17\x05\x67\x7f\xbf\xe0\x90\x20\x47\x4f\xbf\x10\x24\xd9\xf8\xd4\xdc\xc4\xcc\x9c\xf8\xec\xd4\x4a\x85\x50\x3f\xcf\xc0\x50\x57\x05\x0d\xb0\x88\xa6\x35\x17\x20\x00\x00\xff\xff\x18\x48\x0d\x30\x63\x00\x00\x00") + +func dbMigrations0006_user_email_uniqueSqlBytes() ([]byte, error) { + return bindataRead( + _dbMigrations0006_user_email_uniqueSql, + "db/migrations/0006_user_email_unique.sql", + ) +} + +func dbMigrations0006_user_email_uniqueSql() (*asset, error) { + bytes, err := dbMigrations0006_user_email_uniqueSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "db/migrations/0006_user_email_unique.sql", size: 99, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dbMigrationsAssetsGo = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd4\x99\x5b\x6f\xdb\x46\xf6\xc0\x9f\xad\x4f\xc1\x1a\x68\x21\xfd\xe1\xbf\x4c\xf1\x22\x8a\x06\xfa\xd2\x26\x0b\xe4\xa1\x29\xb0\xcd\x3e\xad\x17\xc2\x90\x1c\xba\x44\x65\xc9\x11\xe5\xae\x93\x20\xdf\x7d\xcf\x6f\xce\x91\xad\xc4\x92\x9d\x38\x1b\x64\xfb\x40\x8b\x9c\x39\x73\xe6\xdc\x6f\x3e\x3d\x8d\x7e\x5e\x35\x3e\xba\xf0\x4b\xbf\x76\x1b\xdf\x44\xd5\x9b\xe8\x62\xf5\xff\x55\xb7\x6c\xdc\xc6\x8d\x07\x02\xd0\xaf\xae\xd7\xb5\xef\xcf\x78\x6f\xaa\xd3\xcb\xee\x42\x20\xbb\xd5\xb2\x3f\x8d\xe3\x78\x32\xef\x96\xdd\xa6\x73\x8b\xf9\xed\xfa\xb8\x7f\xbd\xd8\x0b\x9b\xcc\x1b\x7f\x33\x77\xcd\x65\x77\x18\x26\x9d\x5f\xf7\x7e\x3d\xaf\xd7\x1e\x6a\xe6\x6e\x73\x10\x32\x9b\xf7\xbe\xef\xe5\x6b\xbe\x5c\x2d\x6b\x7f\x10\x2e\x9f\xaf\x7d\xbb\xf6\xfd\xef\xf3\xcd\xea\x0f\xbf\x34\xd4\x07\xc1\xa7\x4a\x80\xbf\x74\xdd\x62\x7e\xbd\xec\x5e\x5f\x1f\x80\x75\x7d\xef\x37\xfd\xf8\x62\xc5\xd6\xb3\x5f\xa3\x97\xbf\xbe\x8a\x9e\x3f\x7b\xf1\xea\xbb\xc1\xe0\xca\xd5\x7f\xb8\x0b\x1f\xdd\x41\x0f\x06\xdd\xe5\xd5\x6a\xbd\x89\x86\x83\xa3\xe3\xea\xcd\xc6\xf7\xc7\xf2\x52\xaf\x2e\xaf\x84\xb2\xfe\xf4\xe2\x6d\x77\xc5\x42\x7b\xb9\xe1\xa7\x5b\xf1\xb7\xdf\xac\xbb\xe5\x45\x00\x5c\x85\xbf\x9b\xee\xd2\xeb\xf6\x69\xb7\xba\xde\x74\x0b\x3e\xae\xdc\xe6\xf7\xd3\xb6\x5b\x78\x5e\x8e\x07\xa3\xc1\xa0\xbd\x5e\xd6\x91\x29\xf0\xef\xde\x35\x43\x5e\xa2\x7f\xfe\x8b\x6b\x4f\xa2\xa5\xbb\xf4\x91\xa2\x1e\x45\xc3\xed\xaa\x5f\xaf\x57\xeb\x51\xf4\x6e\x70\x74\xf1\x36\x7c\x45\x67\x3f\x46\x50\x35\x7e\xe9\xff\x0d\x12\xbf\x1e\x06\xb2\xf9\xfe\xe9\xba\x6d\xe5\x1b\xb4\xa3\xd1\xe0\xa8\x6b\xc3\x81\xef\x7e\x8c\x96\xdd\x02\x14\x47\x6b\xbf\xb9\x5e\x2f\xf9\x3c\x89\x84\xa5\xf1\x73\xb0\xb7\xc3\x63\x10\x45\xdf\xbf\x3e\x8b\xbe\xff\xf3\x58\x29\x09\x77\x09\x8e\xf7\x83\xc1\xd1\x9f\x6e\x1d\x55\xd7\x6d\xa4\xf7\xe8\x25\x83\xa3\xb9\x92\xf3\x63\xd4\xad\xc6\x3f\xaf\xae\xde\x0c\x7f\x10\x98\x13\xa1\x4d\x4e\xd5\x8b\xe7\x5b\x4a\xc7\x3f\x2f\x56\xbd\x1f\x0a\xfb\xff\x25\x7a\x40\xa3\xf8\x0f\x20\x12\x40\xa5\xdb\x16\x85\xac\xf1\x4f\x90\x3e\x1c\x9d\x00\x31\x90\xbd\xcd\x9b\x2b\x1f\x05\x43\x41\xe4\xd7\xf5\x06\x2c\x81\x3f\xd3\x87\x5c\xb3\x6c\x57\x51\xb4\xea\xc7\x7f\x13\x1d\xbe\x90\x8f\xdb\x73\xa6\xc2\xed\xfa\x0e\x86\x1d\x1d\x0e\x8e\xfa\xee\xad\x8f\xba\xe5\x66\x9a\x0d\x8e\x2e\xf1\x65\xc3\xf5\x8b\xbc\x87\x95\x57\x62\x36\x11\xb6\x33\xe6\x0d\xf4\xc1\x42\x86\x6d\xf7\xf1\x15\xa3\xe8\xa5\x60\x1e\x8e\x0c\x37\x57\x19\x73\x6d\x37\xe6\x52\x39\x7c\xf8\xec\x6f\x42\x88\x9c\x0d\xa4\x7c\x78\x14\x12\x1f\x3c\x0a\xad\x72\x74\x87\xf2\x0f\x11\xc0\xd7\x63\x08\x60\x4e\x70\xdc\x32\x7a\x0f\x83\x71\x7f\x18\xc9\x8b\xfe\x59\xb7\x16\x14\xd5\x6a\xb5\xd8\x3d\xed\x16\xfd\x23\x9c\xbf\xe9\x95\x71\xbf\x6e\x5d\xed\xdf\xbd\xdf\x39\x6d\x96\x80\x71\xcf\x9b\xea\x97\xdb\x88\xb0\x3f\x76\xfe\xf6\x7a\x21\xa6\xae\xb6\x31\x3c\x3e\xbf\x99\xb4\xe7\x37\xb3\xea\xfc\x26\x9e\xc9\x13\xdb\x53\x9e\xdf\x4c\xbd\xac\xdb\x5a\x2b\x30\x65\x2d\x4f\x7a\x7e\x53\x03\xef\xce\x6f\x1a\x39\x93\xca\xde\x44\x9e\x7a\x7a\x7e\xe3\x65\xbd\x90\x73\xb1\xec\x95\x13\xd9\x17\xd8\x99\xac\x4f\xe5\x29\x65\xcf\x09\x9c\x2b\xe4\xbb\x11\x38\xd9\x9f\xca\xe3\xe4\xa9\x32\x81\x95\xbd\x22\xd7\xef\x54\x60\x52\xd6\xe5\x3b\x01\xaf\xd0\x91\x43\x83\x9c\xcb\x04\xe7\x44\xf0\x4f\x05\x5f\x53\xe8\x6f\xce\xbb\xdc\x9b\x09\xdc\x44\x70\xd5\xb2\x5e\x7b\xa5\x89\xf3\x33\xc1\x35\x93\xf5\x5c\x78\x69\xe4\xbb\x15\x3e\x3c\x34\x55\x7a\x1e\xfa\x26\x5e\xe5\x50\xcb\x9d\x71\xa1\xf7\x40\x0f\xbc\x67\x9c\x31\x7e\x80\x4f\xbd\xca\x23\x11\x7c\x25\xf7\x08\x3d\x59\x22\xef\x72\x47\xd6\x2a\xaf\x15\xf8\xa0\x5d\xd6\xdb\x52\xe5\xdb\x0a\x7c\x2b\x6b\x55\x63\x74\xc2\xaf\xc0\x7a\xb9\xaf\x94\xa7\x15\xd8\x4c\xd6\xa6\xb5\xca\xc1\x23\x0b\xd9\xcf\x04\x7f\x0a\x8d\xf2\x5d\x0b\x0d\xb9\xbc\xa7\xb5\xc2\xd5\x82\xa7\x8d\x95\xff\x52\xce\x37\x4e\xe5\x0f\xef\xc8\x11\x1a\xe0\xd9\xe5\x4a\xbb\x4b\x55\xae\x9c\x41\x2e\x71\x6d\xb2\x31\xb9\x40\x53\xb1\xd5\x57\xab\x74\xc4\xf0\x9e\xe8\xdd\xb5\xd0\x5e\x97\xba\x5e\x18\x7c\x16\xab\xcc\xea\xc4\xe8\x91\x7d\xdf\xa8\xbd\x20\xbb\x2a\x56\x1e\xe3\x4a\xed\xc6\x25\xaa\x3b\x74\x0e\x6f\xac\xc7\x53\xb5\x8d\x7a\xa2\x76\x02\x5d\xf0\x34\x91\xdf\x0a\x9d\xa7\xaa\x9f\x60\x1b\x9c\x95\x7b\x73\xe4\xe6\x14\x4f\x25\x38\x13\xb9\xbb\x84\x7e\xec\xb6\x34\xfb\xac\xf4\x0e\xec\x18\xfd\xc7\xb2\xef\x33\xd5\x55\x85\x0d\xe6\xca\x7f\x9b\xea\x77\x36\x51\x5a\x80\x83\x86\x18\x9d\xb3\x57\x28\xfe\x20\x7f\xd3\xe7\x44\x60\x2a\xf9\x4d\x62\xb5\x8d\xaa\x55\x78\x68\x2b\x04\xde\xcb\x7d\x3e\x56\xf9\x23\xcf\xba\xd6\x35\x7c\x24\x6d\x54\x27\xf8\x12\xf6\x00\x4d\xdc\x8d\x0c\x67\xb5\xd1\x23\xeb\xb3\x44\xfd\x10\x7d\x41\x3f\xf0\xe0\x47\xb6\xd0\x10\x64\xdf\xaa\xbf\xb4\x02\x57\xa4\x8a\x27\x2b\x55\x26\xb1\xc9\x0b\x3d\x40\x53\x2e\xfb\x49\xa2\xb2\x83\x2e\xe0\xb9\x23\x13\x7c\x93\x4a\xf1\x62\x77\xd8\x2a\x3e\x92\x10\x07\x62\xdd\x47\x8e\xae\x54\x9f\x82\xef\xbc\x56\xdf\x41\xe7\x93\x99\xda\x05\x3a\x2b\xd8\x73\xca\xef\xb4\x54\xfa\x1c\xf6\x28\x6b\xf9\x56\xf7\xa9\xe2\xc2\x66\x6b\xc1\x9f\x14\x2a\x73\xec\x93\xd8\x83\x3f\x3a\xec\x92\x58\xd1\xa8\x7f\x00\x83\x2c\xd0\x0f\x76\x91\x7b\xa5\x21\x6b\xd4\xce\x89\x11\x49\xae\xf4\x62\x97\x81\xff\x52\xef\x45\x06\xd8\x25\x7e\x43\xbc\xc1\x9e\x80\x0f\xf7\x4c\x55\x27\xd0\x12\xe2\xa0\xac\xf9\x5a\x6d\xa7\x88\x15\x3f\xf1\x0c\x9e\x9b\x46\x69\xe2\x1d\x99\x67\x85\xca\xcd\x73\xaf\xe0\x9d\xca\x5d\xad\xd3\x18\x94\x63\x6b\xc0\x25\x6a\x63\xe8\x0e\x9d\x83\x83\xb5\x04\xbc\xa5\xfa\x03\xf1\x10\x7d\xe3\xb7\x33\x8b\xa7\xe0\x47\x07\x0e\xb8\xa9\xf2\x5d\xd5\x2a\x07\x64\x8c\xbd\x72\x07\x3a\x24\x2e\xc3\x2f\x76\xd2\x22\xdf\x5a\xe3\x24\x31\xcc\xa7\x6a\xff\x05\x31\xab\x55\x9e\x72\xec\x1f\x5d\xc9\x7a\x35\x53\xfd\x12\x17\x42\x2e\x28\x34\x96\x57\x4e\x75\x8e\x5d\x61\x47\xdb\xdc\x40\xec\xe3\x71\x5e\xe3\x1c\x3e\x44\x2c\x27\x1e\xc5\xf9\x16\xee\x78\x5b\x25\x7e\x52\x42\xb2\xa2\x66\x5f\xb1\xb8\x2d\x7d\x76\x8a\x4d\xa9\x92\x3e\x2d\xcf\x9d\x08\xe4\xf1\xa7\xf6\x13\xc7\x02\x3d\xba\x2d\x5d\x3e\x09\x3f\x14\xff\x5f\xa8\xbd\x76\x29\x0e\xc5\xd7\x6d\x85\xfb\x39\xfc\x3f\x56\x58\xde\xd6\x83\xa1\xa2\x13\xe4\x1f\x95\x09\xef\x28\xa0\xce\xa2\xcf\x60\x39\xa2\x6e\x3a\x8b\x26\xe9\x6c\x76\x12\x51\x02\x9d\xed\x56\x48\xc3\x2c\x89\x47\x61\x9d\xc2\xe6\x4c\x0b\x9f\x7f\x2c\xbb\x9b\xe1\xe4\x24\x8a\x47\x52\xc1\x3a\xa8\xf8\x21\x88\xe0\x5d\xe0\xfb\x2c\x32\xf6\x21\xf1\x2c\x0a\x3f\xef\x6f\xb5\xe8\x4e\x1e\x2a\x56\x76\x9a\xb7\xa7\x16\x29\x24\xaa\x90\x1c\x72\x4d\x0c\xa1\x10\x68\xd4\xc0\x13\x4b\xa6\x49\xad\x01\x12\x18\xf0\xe1\x88\x04\xea\x02\x07\xca\x14\x2f\x0e\x19\x37\x1a\xb0\x80\xe1\xbb\xc8\x34\x49\x17\xe6\x78\xd9\xcc\xf0\x97\x8a\x9f\x7b\x92\x4a\x93\x0f\xce\xce\x1a\xc9\x93\xc0\x82\xa3\x13\x9c\x48\x50\x24\xe8\x99\x9c\x4b\xec\x09\x09\xdb\xab\xa3\x51\x5c\x84\xa2\xc8\x6b\x22\x8b\xcd\x09\x09\xc4\x04\x4c\x92\x0e\xc5\x08\x77\xb0\x86\x13\x83\x9b\x00\x96\x59\xf2\x87\xb7\xd6\x9e\xd4\xce\xcd\x0c\x86\xa0\x4f\xd0\x9c\xc5\x3b\x72\xdc\x71\x6a\x8a\x2c\x02\x24\xce\x1c\x02\x7f\xb5\x0b\x77\xc8\xa9\x3f\x54\xdc\x17\x3b\xf3\x87\xe8\xf6\x3b\xf1\x47\x8d\xfe\x83\xce\xfb\x21\xbe\x27\x38\xed\x5e\xfe\xbe\x9a\xb3\xde\x63\xcd\x9c\xb4\xc8\xbf\xbd\x8b\xde\x9b\x9d\xfc\x55\x1c\x15\x5c\x89\x55\xaf\x7c\x07\x07\x6a\x75\x8d\xce\x21\x31\x1c\x64\x47\xce\x87\xb3\x13\xfd\xa6\x6a\x27\xab\x72\x5f\x9b\xeb\x93\x5b\x05\x11\xe8\xf4\xe6\x7c\x74\x2c\x4e\xf1\x81\x9f\x6e\x80\x7b\xc1\x03\xee\x10\x30\x9c\x05\x0b\xab\x08\x13\xcb\xe4\x64\xfa\x18\x1e\xb2\x3d\x4e\x99\x69\x50\x20\xbb\x13\x40\xa8\xe6\x1e\x77\xca\x7d\xaa\xfa\x62\xd7\xdc\x87\x74\xbf\x83\xee\x9d\xb2\x3d\xe8\xa6\xfb\x70\x3f\xc1\x59\x1f\xe0\xfb\xab\xb9\xec\x01\x66\xb7\x8e\x1b\x7f\x7b\xc7\xfd\x68\x94\xf9\x57\x70\x5b\x1a\xec\xad\x6b\xe2\x62\xe1\x2e\x73\x1d\xdc\xef\x73\x5d\x15\xd7\xab\x2b\xcb\xcd\x95\xe2\xe6\x1e\x68\x08\x77\x95\x5a\x6c\xf3\xfe\x98\x4b\xd2\x4c\xd1\x90\xd3\x78\x20\x37\x1a\x8b\xc7\x5d\xf2\xbe\x12\xbe\xd8\x21\xef\xa3\xdc\xef\x8e\x7b\x46\xd9\x0f\x3a\xe3\x7d\xbc\x4f\x70\xc5\x83\xfc\x7e\x35\x47\xdc\xcb\xa6\xb9\xe1\xf4\x7f\xc0\x0d\xf7\xff\xa7\xe0\xa9\xde\x48\x5b\x59\xc6\x6a\xfd\x24\x1b\xc6\x07\xb7\x23\x39\xda\xc0\x56\xbd\x81\x4a\x91\x31\x1a\x1e\xc7\xf9\x69\xa2\xb0\x24\x1f\x2a\x4e\x92\x0a\x23\x9b\xd8\xc6\x0a\xb4\xb8\x8c\x63\x72\xab\x68\xf1\x4a\xaa\xc2\xdc\x46\x45\x8c\x40\x18\x85\xd0\x86\xd3\x7e\xe2\x35\x61\xd4\xe3\xb5\x8d\xa5\x25\xa4\xc5\x86\x7e\xc6\x03\xd3\x99\xd1\xe2\xb4\x75\x6e\x2b\xfd\x4e\xcc\x93\x68\x51\x69\x77\xa9\x4c\x2b\x6b\x8b\xc3\x38\xd0\x46\x12\xb4\xbe\x8c\x16\xf0\xc4\xc4\xda\x6e\xc6\x18\xf0\xc9\x1a\xf0\xf9\xe4\x6e\x94\xc5\x28\xa6\x9a\xea\x38\x8c\xd6\xb9\xb2\x71\x10\xf2\x23\x62\x20\x07\xe8\x0f\x72\xa4\xd5\x4f\x35\x79\xd3\x5e\x97\x33\x6d\x85\x43\xb5\xdd\xe8\xf8\x2a\xb7\x08\x40\x9b\xcd\xe8\x88\xd1\x03\x74\x57\x96\xdc\xe1\x89\xd6\xdf\xd9\x88\x2b\xac\x67\x4a\x03\xf4\x21\x4b\x68\x0a\x95\x77\xab\x63\x3d\x57\x1b\x3d\xa9\x8e\x87\xa8\xd4\x4b\x8b\x7c\xc8\x0f\x3a\x83\x4e\xbc\x46\x54\x46\x2c\x8c\x0e\xc2\x78\xaa\xd0\xd1\x0a\xe3\x55\xf0\x4d\x12\xd5\x09\xed\x7c\x18\xdf\xd0\x69\x34\xc6\x77\xae\xed\x3c\xba\x0c\x63\xc4\x54\xe5\xc4\xd8\x2d\xdf\xea\x27\xd1\x7b\xe0\x8f\x5f\xd6\xc2\xf8\xa6\x50\x78\x46\x1b\x44\xc2\x30\xee\xa9\x6c\x54\x9a\xdd\x15\x35\x9c\x27\xf2\x66\xf0\x6a\x38\xfd\x96\xaf\x5a\x7f\xe9\x6e\xd0\x1d\x76\x1c\xc6\x81\x33\xc5\xcd\x98\x91\xce\x08\x79\x25\xdb\x2c\x53\x69\x27\xe3\x4b\x1b\x81\xd9\x98\x98\x7b\xe9\x8a\x72\x1b\xa3\x62\x33\x8c\x7a\xf8\x6d\x6c\xf4\x4a\x01\x86\xfc\x42\xd1\x64\x23\x30\xec\x8e\xec\x83\x6e\x73\xeb\xc6\x2a\x1b\x29\x63\x57\x8c\x63\x18\xc9\x62\x4f\xc8\x6f\x62\x23\x1f\x68\x62\xc4\x86\x1e\x18\x99\x41\x1b\x6b\x61\xc4\x17\x5b\x41\x56\xa9\xae\xf0\xbb\x20\x73\xc6\x49\x4e\xe9\x73\xce\x46\x8d\x36\xda\x62\x04\x93\xda\x08\x28\x64\xb6\xec\xce\x5e\x82\x5f\xe6\xca\x2f\x7a\x68\xcc\x67\x18\x21\x33\x6a\xc5\x87\x18\x85\x36\x13\x1b\x4d\xd9\x78\x91\xec\xc7\xf8\x99\x91\x1d\x76\x80\x9f\x17\x26\x67\xc6\xd8\x64\x3f\xf6\x82\x2d\xb4\xda\x91\xde\x2b\x30\x5b\x1b\x55\x35\x77\x23\xdc\x3b\xb8\x43\xd9\xec\x60\x2c\xfb\xe2\xa4\x76\x10\xf3\xfe\xdc\x76\xf8\xdf\xaf\x0f\xa6\xb8\x83\xb7\x3c\x21\xd3\x3d\x26\x8b\xaf\x96\xf0\x1e\xe2\xdd\xf2\x5e\x1e\x7f\xd3\xc6\xf1\x3f\x01\x00\x00\xff\xff\x49\xb9\xe8\x2d\x00\x20\x00\x00") + +func dbMigrationsAssetsGoBytes() ([]byte, error) { + return bindataRead( + _dbMigrationsAssetsGo, + "db/migrations/assets.go", + ) +} + +func dbMigrationsAssetsGo() (*asset, error) { + bytes, err := dbMigrationsAssetsGoBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "db/migrations/assets.go", size: 16384, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "db/migrations/0001_initial_migration.sql": dbMigrations0001_initial_migrationSql, + "db/migrations/0002_dex_admin.sql": dbMigrations0002_dex_adminSql, + "db/migrations/0003_user_created_at.sql": dbMigrations0003_user_created_atSql, + "db/migrations/0004_session_nonce.sql": dbMigrations0004_session_nonceSql, + "db/migrations/0005_refresh_token_create.sql": dbMigrations0005_refresh_token_createSql, + "db/migrations/0006_user_email_unique.sql": dbMigrations0006_user_email_uniqueSql, + "db/migrations/assets.go": dbMigrationsAssetsGo, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "db": &bintree{nil, map[string]*bintree{ + "migrations": &bintree{nil, map[string]*bintree{ + "0001_initial_migration.sql": &bintree{dbMigrations0001_initial_migrationSql, map[string]*bintree{}}, + "0002_dex_admin.sql": &bintree{dbMigrations0002_dex_adminSql, map[string]*bintree{}}, + "0003_user_created_at.sql": &bintree{dbMigrations0003_user_created_atSql, map[string]*bintree{}}, + "0004_session_nonce.sql": &bintree{dbMigrations0004_session_nonceSql, map[string]*bintree{}}, + "0005_refresh_token_create.sql": &bintree{dbMigrations0005_refresh_token_createSql, map[string]*bintree{}}, + "0006_user_email_unique.sql": &bintree{dbMigrations0006_user_email_uniqueSql, map[string]*bintree{}}, + "assets.go": &bintree{dbMigrationsAssetsGo, map[string]*bintree{}}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/db/user.go b/db/user.go index cff1d3e9..50ee8593 100644 --- a/db/user.go +++ b/db/user.go @@ -14,7 +14,10 @@ import ( ) const ( - userTableName = "dex_user" + // This table is named authd_user for historical reasons; namely, that the + // original name of the project was authd, and there are existing tables out + // there that we don't want to have to rename in production. + userTableName = "authd_user" remoteIdentityMappingTableName = "remote_identity_mapping" ) From 84bc8073deeb21099120540b9f7397329df6e4d5 Mon Sep 17 00:00:00 2001 From: Bobby Rullo Date: Wed, 19 Aug 2015 15:40:59 -0700 Subject: [PATCH 3/4] functional: Test DBs use migrations All repo tests build their tables by applying all the migrations. This way we know our migrations are functional. --- functional/db_test.go | 6 ++++-- functional/repo/testutil.go | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/functional/db_test.go b/functional/db_test.go index f4bc53eb..dbb7df70 100644 --- a/functional/db_test.go +++ b/functional/db_test.go @@ -40,10 +40,12 @@ func connect(t *testing.T) *gorp.DbMap { t.Fatalf("Unable to drop database tables: %v", err) } - if err = c.CreateTablesIfNotExists(); err != nil { - t.Fatalf("Unable to create database tables: %v", err) + if err = db.DropMigrationsTable(c); err != nil { + panic(fmt.Sprintf("Unable to drop migration table: %v", err)) } + db.MigrateToLatest(c) + return c } diff --git a/functional/repo/testutil.go b/functional/repo/testutil.go index 63bc963f..8b2d0b79 100644 --- a/functional/repo/testutil.go +++ b/functional/repo/testutil.go @@ -18,8 +18,10 @@ func initDB(dsn string) *gorp.DbMap { panic(fmt.Sprintf("Unable to drop database tables: %v", err)) } - if err = c.CreateTablesIfNotExists(); err != nil { - panic(fmt.Sprintf("Unable to create database tables: %v", err)) + if err = db.DropMigrationsTable(c); err != nil { + panic(fmt.Sprintf("Unable to drop migration table: %v", err)) } + + db.MigrateToLatest(c) return c } From 8b6a2699d9704394082eb70333e0964e1ec67224 Mon Sep 17 00:00:00 2001 From: Bobby Rullo Date: Wed, 19 Aug 2015 16:23:23 -0700 Subject: [PATCH 4/4] cmd/dex-overlord, db: migrations in overlord Migrations happen only in the overlord, so there's no thundering herd, and database initialziation can be more easily controlled. --- cmd/dex-overlord/main.go | 19 ++++++++++++++++++- db/conn.go | 12 ------------ 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/cmd/dex-overlord/main.go b/cmd/dex-overlord/main.go index 263297b1..419b6f2c 100644 --- a/cmd/dex-overlord/main.go +++ b/cmd/dex-overlord/main.go @@ -15,6 +15,7 @@ import ( "github.com/coreos/dex/db" pflag "github.com/coreos/dex/pkg/flag" "github.com/coreos/dex/pkg/log" + ptime "github.com/coreos/dex/pkg/time" "github.com/coreos/dex/server" "github.com/coreos/dex/user" ) @@ -29,14 +30,17 @@ func main() { fs := flag.NewFlagSet("dex-overlord", flag.ExitOnError) secret := fs.String("key-secret", "", "symmetric key used to encrypt/decrypt signing key data in DB") dbURL := fs.String("db-url", "", "DSN-formatted database connection string") + + dbMigrate := fs.Bool("db-migrate", true, "perform database migrations when starting up overlord. This includes the initial DB objects creation.") + keyPeriod := fs.Duration("key-period", 24*time.Hour, "length of time for-which a given key will be valid") gcInterval := fs.Duration("gc-interval", time.Hour, "length of time between garbage collection runs") adminListen := fs.String("admin-listen", "http://0.0.0.0:5557", "scheme, host and port for listening for administrative operation requests ") + localConnectorID := fs.String("local-connector", "local", "ID of the local connector") logDebug := fs.Bool("log-debug", false, "log debug-level information") logTimestamps := fs.Bool("log-timestamps", false, "prefix log lines with timestamps") - localConnectorID := fs.String("local-connector", "local", "ID of the local connector") if err := fs.Parse(os.Args[1:]); err != nil { fmt.Fprintln(os.Stderr, err.Error()) @@ -74,6 +78,19 @@ func main() { log.Fatalf(err.Error()) } + if *dbMigrate { + var sleep time.Duration + for { + if migrations, err := db.MigrateToLatest(dbc); err == nil { + log.Infof("Performed %d db migrations", migrations) + break + } + sleep = ptime.ExpBackoff(sleep, time.Minute) + log.Errorf("Unable to migrate database, retrying in %v: %v", sleep, err) + time.Sleep(sleep) + } + } + userRepo := db.NewUserRepo(dbc) pwiRepo := db.NewPasswordInfoRepo(dbc) userManager := user.NewManager(userRepo, diff --git a/db/conn.go b/db/conn.go index c4c2a8e4..6baa5e32 100644 --- a/db/conn.go +++ b/db/conn.go @@ -5,13 +5,11 @@ import ( "errors" "fmt" "strings" - "time" "github.com/coopernurse/gorp" _ "github.com/lib/pq" "github.com/coreos/dex/pkg/log" - ptime "github.com/coreos/dex/pkg/time" "github.com/coreos/dex/repo" ) @@ -73,16 +71,6 @@ func NewConnection(cfg Config) (*gorp.DbMap, error) { } } - var sleep time.Duration - for { - if err = dbm.CreateTablesIfNotExists(); err == nil { - break - } - sleep = ptime.ExpBackoff(sleep, time.Minute) - log.Errorf("Unable to initialize database, retrying in %v: %v", sleep, err) - time.Sleep(sleep) - } - return &dbm, nil }