Merge pull request #67 from bobbyrullo/db_migrate

DB Migrations for Dex
This commit is contained in:
bobbyrullo 2015-08-20 11:45:35 -07:00
commit 6798adc607
56 changed files with 8758 additions and 20 deletions

11
Godeps/Godeps.json generated
View file

@ -1,5 +1,5 @@
{ {
"ImportPath": "github.com/coreos-inc/auth", "ImportPath": "github.com/coreos/dex",
"GoVersion": "go1.4.2", "GoVersion": "go1.4.2",
"Packages": [ "Packages": [
"./..." "./..."
@ -96,6 +96,10 @@
"ImportPath": "github.com/mbanzon/simplehttp", "ImportPath": "github.com/mbanzon/simplehttp",
"Rev": "04c542e7ac706a25820090f274ea6a4f39a63326" "Rev": "04c542e7ac706a25820090f274ea6a4f39a63326"
}, },
{
"ImportPath": "github.com/rubenv/sql-migrate",
"Rev": "53184e1edfb4f9655b0fa8dd2c23e7763f452bda"
},
{ {
"ImportPath": "golang.org/x/crypto/bcrypt", "ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "1fbbd62cfec66bd39d91e97749579579d4d3037e" "Rev": "1fbbd62cfec66bd39d91e97749579579d4d3037e"
@ -115,6 +119,11 @@
{ {
"ImportPath": "google.golang.org/api/googleapi", "ImportPath": "google.golang.org/api/googleapi",
"Rev": "d3edb0282bde692467788c50070a9211afe75cf3" "Rev": "d3edb0282bde692467788c50070a9211afe75cf3"
},
{
"ImportPath": "gopkg.in/gorp.v1",
"Comment": "v1.7.1",
"Rev": "c87af80f3cc5036b55b83d77171e156791085e2e"
} }
] ]
} }

View file

@ -0,0 +1,5 @@
.*.swp
*.test
/sql-migrate/test.db
/test.db

View file

@ -0,0 +1,25 @@
language: go
go:
- 1.2
- 1.3
- 1.4
- tip
services:
- mysql
before_install:
- mysql -e "CREATE DATABASE IF NOT EXISTS test;" -uroot
- psql -c "CREATE DATABASE test;" -U postgres
install:
- go get -t ./...
- go install ./...
script:
- go test -v ./...
- bash test-integration/postgres.sh
- bash test-integration/mysql.sh
- bash test-integration/mysql-flag.sh
- bash test-integration/sqlite.sh

View file

@ -0,0 +1,245 @@
# sql-migrate
> SQL Schema migration tool for [Go](http://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose).
[![Build Status](https://travis-ci.org/rubenv/sql-migrate.svg?branch=master)](https://travis-ci.org/rubenv/sql-migrate) [![GoDoc](https://godoc.org/github.com/rubenv/sql-migrate?status.png)](https://godoc.org/github.com/rubenv/sql-migrate)
Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://github.com/rubenv/modl-migrate).
## Features
* Usable as a CLI tool or as a library
* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp))
* Can embed migrations into your application
* Migrations are defined with SQL for full flexibility
* Atomic migrations
* Up/down migrations to allow rollback
* Supports multiple database types in one project
## Installation
To install the library and command line program, use the following:
```bash
go get github.com/rubenv/sql-migrate/...
```
## Usage
### As a standalone tool
```
$ sql-migrate --help
usage: sql-migrate [--version] [--help] <command> [<args>]
Available commands are:
down Undo a database migration
redo Reapply the last migration
status Show migration status
up Migrates the database to the most recent version available
```
Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments:
```yml
development:
dialect: sqlite3
datasource: test.db
dir: migrations/sqlite3
production:
dialect: postgres
datasource: dbname=myapp sslmode=disable
dir: migrations/postgres
table: migrations
```
The `table` setting is optional and will default to `gorp_migrations`.
The environment that will be used can be specified with the `-env` flag (defaults to `development`).
Use the `--help` flag in combination with any of the commands to get an overview of its usage:
```
$ sql-migrate up --help
Usage: sql-migrate up [options] ...
Migrates the database to the most recent version available.
Options:
-config=config.yml Configuration file to use.
-env="development" Environment.
-limit=0 Limit the number of migrations (0 = unlimited).
-dryrun Don't apply migrations, just print them.
```
The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter.
The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
Use the `status` command to see the state of the applied migrations:
```bash
$ sql-migrate status
+---------------+-----------------------------------------+
| MIGRATION | APPLIED |
+---------------+-----------------------------------------+
| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC |
| 2_record.sql | no |
+---------------+-----------------------------------------+
```
### As a library
Import sql-migrate into your application:
```go
import "github.com/rubenv/sql-migrate"
```
Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later):
```go
// Hardcoded strings in memory:
migrations := &migrate.MemoryMigrationSource{
Migrations: []*migrate.Migration{
&migrate.Migration{
Id: "123",
Up: []string{"CREATE TABLE people (id int)"},
Down: []string{"DROP TABLE people"},
},
},
}
// OR: Read migrations from a folder:
migrations := &migrate.FileMigrationSource{
Dir: "db/migrations",
}
// OR: Use migrations from bindata:
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "migrations",
}
```
Then use the `Exec` function to upgrade your database:
```go
db, err := sql.Open("sqlite3", filename)
if err != nil {
// Handle errors!
}
n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up)
if err != nil {
// Handle errors!
}
fmt.Printf("Applied %d migrations!\n", n)
```
Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails.
Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation.
## Writing migrations
Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations.
```sql
-- +migrate Up
-- SQL in section 'Up' is executed when this migration is applied
CREATE TABLE people (id int);
-- +migrate Down
-- SQL section 'Down' is executed when this migration is rolled back
DROP TABLE people;
```
You can put multiple statements in each block, as long as you end them with a semicolon (`;`).
If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries:
```sql
-- +migrate Up
CREATE TABLE people (id int);
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION do_something()
returns void AS $$
DECLARE
create_query text;
BEGIN
-- Do something here
END;
$$
language plpgsql;
-- +migrate StatementEnd
-- +migrate Down
DROP FUNCTION do_something();
DROP TABLE people;
```
The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename.
## Embedding migrations with [bindata](https://github.com/jteeuwen/go-bindata)
If you like your Go applications self-contained (that is: a single binary): use [bindata](https://github.com/jteeuwen/go-bindata) to embed the migration files.
Just write your migration files as usual, as a set of SQL files in a folder.
Then use bindata to generate a `.go` file with the migrations embedded:
```bash
go-bindata -pkg myapp -o bindata.go db/migrations/
```
The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives).
Use the `AssetMigrationSource` in your application to find the migrations:
```go
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "db/migrations",
}
```
Both `Asset` and `AssetDir` are functions provided by bindata.
Then proceed as usual.
## Extending
Adding a new migration source means implementing `MigrationSource`.
```go
type MigrationSource interface {
FindMigrations() ([]*Migration, error)
}
```
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field.
## License
(The MIT License)
Copyright (C) 2014-2015 by Ruben Vermeersch <ruben@rocketeer.be>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -0,0 +1,136 @@
package migrate
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"strings"
)
func bindata_read(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
return buf.Bytes(), nil
}
func test_migrations_1_initial_sql() ([]byte, error) {
return bindata_read([]byte{
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x00, 0xff, 0x8c, 0xcd,
0x3d, 0x0e, 0x82, 0x40, 0x10, 0x05, 0xe0, 0x7e, 0x4e, 0xf1, 0x3a, 0x34,
0x86, 0x13, 0x50, 0xa1, 0xd0, 0x91, 0xa8, 0x08, 0x07, 0x40, 0x76, 0x22,
0x13, 0xd7, 0xdd, 0x09, 0xac, 0xc1, 0xe3, 0xbb, 0xc4, 0x68, 0xb4, 0xb3,
0x7c, 0x6f, 0x7e, 0xbe, 0x34, 0xc5, 0xe6, 0x26, 0x97, 0xb1, 0x0b, 0x8c,
0x56, 0x29, 0xc6, 0xd3, 0xb1, 0x82, 0x38, 0x4c, 0xdc, 0x07, 0xf1, 0x0e,
0x49, 0xab, 0x09, 0x64, 0x02, 0x3f, 0xb8, 0xbf, 0x07, 0x36, 0x98, 0x07,
0x76, 0x08, 0x43, 0xac, 0x5e, 0x77, 0xcb, 0x52, 0x0c, 0x9d, 0xaa, 0x15,
0x36, 0xb4, 0xab, 0xcb, 0xbc, 0x29, 0xd1, 0xe4, 0xdb, 0xaa, 0x84, 0xb2,
0x57, 0xcb, 0x58, 0x89, 0x89, 0x2f, 0xc3, 0x3a, 0x23, 0xa2, 0x6f, 0xb0,
0xf0, 0xb3, 0x7b, 0x93, 0x1f, 0x6f, 0x29, 0xff, 0x12, 0x47, 0x6f, 0x6d,
0x9c, 0x9e, 0xbb, 0xfe, 0x4a, 0x45, 0xbd, 0x3f, 0xfc, 0x98, 0x19, 0x3d,
0x03, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x70, 0x5e, 0xf9, 0xda, 0x00, 0x00,
0x00,
},
"test-migrations/1_initial.sql",
)
}
func test_migrations_2_record_sql() ([]byte, error) {
return bindata_read([]byte{
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x00, 0xff, 0xd2, 0xd5,
0x55, 0xd0, 0xce, 0xcd, 0x4c, 0x2f, 0x4a, 0x2c, 0x49, 0x55, 0x08, 0x2d,
0xe0, 0xf2, 0xf4, 0x0b, 0x76, 0x0d, 0x0a, 0x51, 0xf0, 0xf4, 0x0b, 0xf1,
0x57, 0x28, 0x48, 0xcd, 0x2f, 0xc8, 0x49, 0x55, 0xd0, 0xc8, 0x4c, 0xd1,
0x54, 0x08, 0x73, 0xf4, 0x09, 0x75, 0x0d, 0x56, 0xd0, 0x30, 0xd4, 0xb4,
0xe6, 0xe2, 0x42, 0xd6, 0xe3, 0x92, 0x5f, 0x9e, 0xc7, 0xe5, 0xe2, 0xea,
0xe3, 0x1a, 0xe2, 0xaa, 0xe0, 0x16, 0xe4, 0xef, 0x0b, 0xd3, 0x15, 0xee,
0xe1, 0x1a, 0xe4, 0xaa, 0x90, 0x99, 0x62, 0x6b, 0x68, 0xcd, 0x05, 0x08,
0x00, 0x00, 0xff, 0xff, 0xf4, 0x3a, 0x7b, 0xae, 0x64, 0x00, 0x00, 0x00,
},
"test-migrations/2_record.sql",
)
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
return f()
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() ([]byte, error){
"test-migrations/1_initial.sql": test_migrations_1_initial_sql,
"test-migrations/2_record.sql": test_migrations_2_record_sql,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for name := range node.Children {
rv = append(rv, name)
}
return rv, nil
}
type _bintree_t struct {
Func func() ([]byte, error)
Children map[string]*_bintree_t
}
var _bintree = &_bintree_t{nil, map[string]*_bintree_t{
"test-migrations": &_bintree_t{nil, map[string]*_bintree_t{
"1_initial.sql": &_bintree_t{test_migrations_1_initial_sql, map[string]*_bintree_t{
}},
"2_record.sql": &_bintree_t{test_migrations_2_record_sql, map[string]*_bintree_t{
}},
}},
}}

View file

@ -0,0 +1,199 @@
/*
SQL Schema migration tool for Go.
Key features:
* Usable as a CLI tool or as a library
* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp)
* Can embed migrations into your application
* Migrations are defined with SQL for full flexibility
* Atomic migrations
* Up/down migrations to allow rollback
* Supports multiple database types in one project
Installation
To install the library and command line program, use the following:
go get github.com/rubenv/sql-migrate/...
Command-line tool
The main command is called sql-migrate.
$ sql-migrate --help
usage: sql-migrate [--version] [--help] <command> [<args>]
Available commands are:
down Undo a database migration
redo Reapply the last migration
status Show migration status
up Migrates the database to the most recent version available
Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments:
development:
dialect: sqlite3
datasource: test.db
dir: migrations/sqlite3
production:
dialect: postgres
datasource: dbname=myapp sslmode=disable
dir: migrations/postgres
table: migrations
The `table` setting is optional and will default to `gorp_migrations`.
The environment that will be used can be specified with the -env flag (defaults to development).
Use the --help flag in combination with any of the commands to get an overview of its usage:
$ sql-migrate up --help
Usage: sql-migrate up [options] ...
Migrates the database to the most recent version available.
Options:
-config=config.yml Configuration file to use.
-env="development" Environment.
-limit=0 Limit the number of migrations (0 = unlimited).
-dryrun Don't apply migrations, just print them.
The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter.
The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
Use the status command to see the state of the applied migrations:
$ sql-migrate status
+---------------+-----------------------------------------+
| MIGRATION | APPLIED |
+---------------+-----------------------------------------+
| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC |
| 2_record.sql | no |
+---------------+-----------------------------------------+
Library
Import sql-migrate into your application:
import "github.com/rubenv/sql-migrate"
Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later):
// Hardcoded strings in memory:
migrations := &migrate.MemoryMigrationSource{
Migrations: []*migrate.Migration{
&migrate.Migration{
Id: "123",
Up: []string{"CREATE TABLE people (id int)"},
Down: []string{"DROP TABLE people"},
},
},
}
// OR: Read migrations from a folder:
migrations := &migrate.FileMigrationSource{
Dir: "db/migrations",
}
// OR: Use migrations from bindata:
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "migrations",
}
Then use the Exec function to upgrade your database:
db, err := sql.Open("sqlite3", filename)
if err != nil {
// Handle errors!
}
n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up)
if err != nil {
// Handle errors!
}
fmt.Printf("Applied %d migrations!\n", n)
Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails.
The full set of capabilities can be found in the API docs below.
Writing migrations
Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations.
-- +migrate Up
-- SQL in section 'Up' is executed when this migration is applied
CREATE TABLE people (id int);
-- +migrate Down
-- SQL section 'Down' is executed when this migration is rolled back
DROP TABLE people;
You can put multiple statements in each block, as long as you end them with a semicolon (;).
If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries:
-- +migrate Up
CREATE TABLE people (id int);
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION do_something()
returns void AS $$
DECLARE
create_query text;
BEGIN
-- Do something here
END;
$$
language plpgsql;
-- +migrate StatementEnd
-- +migrate Down
DROP FUNCTION do_something();
DROP TABLE people;
The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename.
Embedding migrations with bindata
If you like your Go applications self-contained (that is: a single binary): use bindata (https://github.com/jteeuwen/go-bindata) to embed the migration files.
Just write your migration files as usual, as a set of SQL files in a folder.
Then use bindata to generate a .go file with the migrations embedded:
go-bindata -pkg myapp -o bindata.go db/migrations/
The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives).
Use the AssetMigrationSource in your application to find the migrations:
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "db/migrations",
}
Both Asset and AssetDir are functions provided by bindata.
Then proceed as usual.
Extending
Adding a new migration source means implementing MigrationSource.
type MigrationSource interface {
FindMigrations() ([]*Migration, error)
}
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field.
*/
package migrate

View file

@ -0,0 +1,9 @@
package migrate
import (
"testing"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }

View file

@ -0,0 +1,475 @@
package migrate
import (
"bytes"
"database/sql"
"errors"
"fmt"
"io"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/rubenv/sql-migrate/sqlparse"
"gopkg.in/gorp.v1"
)
type MigrationDirection int
const (
Up MigrationDirection = iota
Down
)
var tableName = "gorp_migrations"
var schemaName = ""
var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`)
// Set the name of the table used to store migration info.
//
// Should be called before any other call such as (Exec, ExecMax, ...).
func SetTable(name string) {
if name != "" {
tableName = name
}
}
// SetSchema sets the name of a schema that the migration table be referenced.
func SetSchema(name string) {
if name != "" {
schemaName = name
}
}
func getTableName() string {
t := tableName
if schemaName != "" {
t = fmt.Sprintf("%s.%s", schemaName, t)
}
return t
}
type Migration struct {
Id string
Up []string
Down []string
}
func (m Migration) Less(other *Migration) bool {
switch {
case m.isNumeric() && other.isNumeric():
return m.VersionInt() < other.VersionInt()
case m.isNumeric() && !other.isNumeric():
return true
case !m.isNumeric() && other.isNumeric():
return false
default:
return m.Id < other.Id
}
}
func (m Migration) isNumeric() bool {
return len(m.NumberPrefixMatches()) > 0
}
func (m Migration) NumberPrefixMatches() []string {
return numberPrefixRegex.FindStringSubmatch(m.Id)
}
func (m Migration) VersionInt() int64 {
v := m.NumberPrefixMatches()[1]
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err))
}
return value
}
type PlannedMigration struct {
*Migration
Queries []string
}
type byId []*Migration
func (b byId) Len() int { return len(b) }
func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) }
type MigrationRecord struct {
Id string `db:"id"`
AppliedAt time.Time `db:"applied_at"`
}
var MigrationDialects = map[string]gorp.Dialect{
"sqlite3": gorp.SqliteDialect{},
"postgres": gorp.PostgresDialect{},
"mysql": gorp.MySQLDialect{"InnoDB", "UTF8"},
"mssql": gorp.SqlServerDialect{},
"oci8": gorp.OracleDialect{},
}
type MigrationSource interface {
// Finds the migrations.
//
// The resulting slice of migrations should be sorted by Id.
FindMigrations() ([]*Migration, error)
}
// A hardcoded set of migrations, in-memory.
type MemoryMigrationSource struct {
Migrations []*Migration
}
var _ MigrationSource = (*MemoryMigrationSource)(nil)
func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) {
// Make sure migrations are sorted
sort.Sort(byId(m.Migrations))
return m.Migrations, nil
}
// A set of migrations loaded from a directory.
type FileMigrationSource struct {
Dir string
}
var _ MigrationSource = (*FileMigrationSource)(nil)
func (f FileMigrationSource) FindMigrations() ([]*Migration, error) {
migrations := make([]*Migration, 0)
file, err := os.Open(f.Dir)
if err != nil {
return nil, err
}
files, err := file.Readdir(0)
if err != nil {
return nil, err
}
for _, info := range files {
if strings.HasSuffix(info.Name(), ".sql") {
file, err := os.Open(path.Join(f.Dir, info.Name()))
if err != nil {
return nil, err
}
migration, err := ParseMigration(info.Name(), file)
if err != nil {
return nil, err
}
migrations = append(migrations, migration)
}
}
// Make sure migrations are sorted
sort.Sort(byId(migrations))
return migrations, nil
}
// Migrations from a bindata asset set.
type AssetMigrationSource struct {
// Asset should return content of file in path if exists
Asset func(path string) ([]byte, error)
// AssetDir should return list of files in the path
AssetDir func(path string) ([]string, error)
// Path in the bindata to use.
Dir string
}
var _ MigrationSource = (*AssetMigrationSource)(nil)
func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) {
migrations := make([]*Migration, 0)
files, err := a.AssetDir(a.Dir)
if err != nil {
return nil, err
}
for _, name := range files {
if strings.HasSuffix(name, ".sql") {
file, err := a.Asset(path.Join(a.Dir, name))
if err != nil {
return nil, err
}
migration, err := ParseMigration(name, bytes.NewReader(file))
if err != nil {
return nil, err
}
migrations = append(migrations, migration)
}
}
// Make sure migrations are sorted
sort.Sort(byId(migrations))
return migrations, nil
}
// Migration parsing
func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) {
m := &Migration{
Id: id,
}
up, err := sqlparse.SplitSQLStatements(r, true)
if err != nil {
return nil, err
}
down, err := sqlparse.SplitSQLStatements(r, false)
if err != nil {
return nil, err
}
m.Up = up
m.Down = down
return m, nil
}
// Execute a set of migrations
//
// Returns the number of applied migrations.
func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) {
return ExecMax(db, dialect, m, dir, 0)
}
// Execute a set of migrations
//
// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec).
//
// Returns the number of applied migrations.
func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) {
migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max)
if err != nil {
return 0, err
}
// Apply migrations
applied := 0
for _, migration := range migrations {
trans, err := dbMap.Begin()
if err != nil {
return applied, err
}
for _, stmt := range migration.Queries {
_, err := trans.Exec(stmt)
if err != nil {
trans.Rollback()
return applied, err
}
}
if dir == Up {
err = trans.Insert(&MigrationRecord{
Id: migration.Id,
AppliedAt: time.Now(),
})
if err != nil {
return applied, err
}
} else if dir == Down {
_, err := trans.Delete(&MigrationRecord{
Id: migration.Id,
})
if err != nil {
return applied, err
}
} else {
panic("Not possible")
}
err = trans.Commit()
if err != nil {
return applied, err
}
applied++
}
return applied, nil
}
// Plan a migration.
func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) {
dbMap, err := getMigrationDbMap(db, dialect)
if err != nil {
return nil, nil, err
}
migrations, err := m.FindMigrations()
if err != nil {
return nil, nil, err
}
var migrationRecords []MigrationRecord
_, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", getTableName()))
if err != nil {
return nil, nil, err
}
// Sort migrations that have been run by Id.
var existingMigrations []*Migration
for _, migrationRecord := range migrationRecords {
existingMigrations = append(existingMigrations, &Migration{
Id: migrationRecord.Id,
})
}
sort.Sort(byId(existingMigrations))
// Get last migration that was run
record := &Migration{}
if len(existingMigrations) > 0 {
record = existingMigrations[len(existingMigrations)-1]
}
result := make([]*PlannedMigration, 0)
// Add missing migrations up to the last run migration.
// This can happen for example when merges happened.
if len(existingMigrations) > 0 {
result = append(result, ToCatchup(migrations, existingMigrations, record)...)
}
// Figure out which migrations to apply
toApply := ToApply(migrations, record.Id, dir)
toApplyCount := len(toApply)
if max > 0 && max < toApplyCount {
toApplyCount = max
}
for _, v := range toApply[0:toApplyCount] {
if dir == Up {
result = append(result, &PlannedMigration{
Migration: v,
Queries: v.Up,
})
} else if dir == Down {
result = append(result, &PlannedMigration{
Migration: v,
Queries: v.Down,
})
}
}
return result, dbMap, nil
}
// Filter a slice of migrations into ones that should be applied.
func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration {
var index = -1
if current != "" {
for index < len(migrations)-1 {
index++
if migrations[index].Id == current {
break
}
}
}
if direction == Up {
return migrations[index+1:]
} else if direction == Down {
if index == -1 {
return []*Migration{}
}
// Add in reverse order
toApply := make([]*Migration, index+1)
for i := 0; i < index+1; i++ {
toApply[index-i] = migrations[i]
}
return toApply
}
panic("Not possible")
}
func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration {
missing := make([]*PlannedMigration, 0)
for _, migration := range migrations {
found := false
for _, existing := range existingMigrations {
if existing.Id == migration.Id {
found = true
break
}
}
if !found && migration.Less(lastRun) {
missing = append(missing, &PlannedMigration{Migration: migration, Queries: migration.Up})
}
}
return missing
}
func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) {
dbMap, err := getMigrationDbMap(db, dialect)
if err != nil {
return nil, err
}
var records []*MigrationRecord
query := fmt.Sprintf("SELECT * FROM %s ORDER BY id ASC", getTableName())
_, err = dbMap.Select(&records, query)
if err != nil {
return nil, err
}
return records, nil
}
func getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) {
d, ok := MigrationDialects[dialect]
if !ok {
return nil, fmt.Errorf("Unknown dialect: %s", dialect)
}
// When using the mysql driver, make sure that the parseTime option is
// configured, otherwise it won't map time columns to time.Time. See
// https://github.com/rubenv/sql-migrate/issues/2
if dialect == "mysql" {
var out *time.Time
err := db.QueryRow("SELECT NOW()").Scan(&out)
if err != nil {
if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" {
return nil, errors.New(`Cannot parse dates.
Make sure that the parseTime option is supplied to your database connection.
Check https://github.com/go-sql-driver/mysql#parsetime for more info.`)
} else {
return nil, err
}
}
}
// Create migration database map
dbMap := &gorp.DbMap{Db: db, Dialect: d}
dbMap.AddTableWithNameAndSchema(MigrationRecord{}, schemaName, tableName).SetKeys(false, "Id")
//dbMap.TraceOn("", log.New(os.Stdout, "migrate: ", log.Lmicroseconds))
err := dbMap.CreateTablesIfNotExists()
if err != nil {
return nil, err
}
return dbMap, nil
}
// TODO: Run migration + record insert in transaction.

View file

@ -0,0 +1,357 @@
package migrate
import (
"database/sql"
"os"
_ "github.com/mattn/go-sqlite3"
. "gopkg.in/check.v1"
"gopkg.in/gorp.v1"
)
var filename = "/tmp/sql-migrate-sqlite.db"
var sqliteMigrations = []*Migration{
&Migration{
Id: "123",
Up: []string{"CREATE TABLE people (id int)"},
Down: []string{"DROP TABLE people"},
},
&Migration{
Id: "124",
Up: []string{"ALTER TABLE people ADD COLUMN first_name text"},
Down: []string{"SELECT 0"}, // Not really supported
},
}
type SqliteMigrateSuite struct {
Db *sql.DB
DbMap *gorp.DbMap
}
var _ = Suite(&SqliteMigrateSuite{})
func (s *SqliteMigrateSuite) SetUpTest(c *C) {
db, err := sql.Open("sqlite3", filename)
c.Assert(err, IsNil)
s.Db = db
s.DbMap = &gorp.DbMap{Db: db, Dialect: &gorp.SqliteDialect{}}
}
func (s *SqliteMigrateSuite) TearDownTest(c *C) {
err := os.Remove(filename)
c.Assert(err, IsNil)
}
func (s *SqliteMigrateSuite) TestRunMigration(c *C) {
migrations := &MemoryMigrationSource{
Migrations: sqliteMigrations[:1],
}
// Executes one migration
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 1)
// Can use table now
_, err = s.DbMap.Exec("SELECT * FROM people")
c.Assert(err, IsNil)
// Shouldn't apply migration again
n, err = Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *SqliteMigrateSuite) TestMigrateMultiple(c *C) {
migrations := &MemoryMigrationSource{
Migrations: sqliteMigrations[:2],
}
// Executes two migrations
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 2)
// Can use column now
_, err = s.DbMap.Exec("SELECT first_name FROM people")
c.Assert(err, IsNil)
}
func (s *SqliteMigrateSuite) TestMigrateIncremental(c *C) {
migrations := &MemoryMigrationSource{
Migrations: sqliteMigrations[:1],
}
// Executes one migration
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 1)
// Execute a new migration
migrations = &MemoryMigrationSource{
Migrations: sqliteMigrations[:2],
}
n, err = Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 1)
// Can use column now
_, err = s.DbMap.Exec("SELECT first_name FROM people")
c.Assert(err, IsNil)
}
func (s *SqliteMigrateSuite) TestFileMigrate(c *C) {
migrations := &FileMigrationSource{
Dir: "test-migrations",
}
// Executes two migrations
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 2)
// Has data
id, err := s.DbMap.SelectInt("SELECT id FROM people")
c.Assert(err, IsNil)
c.Assert(id, Equals, int64(1))
}
func (s *SqliteMigrateSuite) TestAssetMigrate(c *C) {
migrations := &AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "test-migrations",
}
// Executes two migrations
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 2)
// Has data
id, err := s.DbMap.SelectInt("SELECT id FROM people")
c.Assert(err, IsNil)
c.Assert(id, Equals, int64(1))
}
func (s *SqliteMigrateSuite) TestMigrateMax(c *C) {
migrations := &FileMigrationSource{
Dir: "test-migrations",
}
// Executes one migration
n, err := ExecMax(s.Db, "sqlite3", migrations, Up, 1)
c.Assert(err, IsNil)
c.Assert(n, Equals, 1)
id, err := s.DbMap.SelectInt("SELECT COUNT(*) FROM people")
c.Assert(err, IsNil)
c.Assert(id, Equals, int64(0))
}
func (s *SqliteMigrateSuite) TestMigrateDown(c *C) {
migrations := &FileMigrationSource{
Dir: "test-migrations",
}
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 2)
// Has data
id, err := s.DbMap.SelectInt("SELECT id FROM people")
c.Assert(err, IsNil)
c.Assert(id, Equals, int64(1))
// Undo the last one
n, err = ExecMax(s.Db, "sqlite3", migrations, Down, 1)
c.Assert(err, IsNil)
c.Assert(n, Equals, 1)
// No more data
id, err = s.DbMap.SelectInt("SELECT COUNT(*) FROM people")
c.Assert(err, IsNil)
c.Assert(id, Equals, int64(0))
// Remove the table.
n, err = ExecMax(s.Db, "sqlite3", migrations, Down, 1)
c.Assert(err, IsNil)
c.Assert(n, Equals, 1)
// Cannot query it anymore
_, err = s.DbMap.SelectInt("SELECT COUNT(*) FROM people")
c.Assert(err, Not(IsNil))
// Nothing left to do.
n, err = ExecMax(s.Db, "sqlite3", migrations, Down, 1)
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *SqliteMigrateSuite) TestMigrateDownFull(c *C) {
migrations := &FileMigrationSource{
Dir: "test-migrations",
}
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 2)
// Has data
id, err := s.DbMap.SelectInt("SELECT id FROM people")
c.Assert(err, IsNil)
c.Assert(id, Equals, int64(1))
// Undo the last one
n, err = Exec(s.Db, "sqlite3", migrations, Down)
c.Assert(err, IsNil)
c.Assert(n, Equals, 2)
// Cannot query it anymore
_, err = s.DbMap.SelectInt("SELECT COUNT(*) FROM people")
c.Assert(err, Not(IsNil))
// Nothing left to do.
n, err = Exec(s.Db, "sqlite3", migrations, Down)
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *SqliteMigrateSuite) TestMigrateTransaction(c *C) {
migrations := &MemoryMigrationSource{
Migrations: []*Migration{
sqliteMigrations[0],
sqliteMigrations[1],
&Migration{
Id: "125",
Up: []string{"INSERT INTO people (id, first_name) VALUES (1, 'Test')", "SELECT fail"},
Down: []string{}, // Not important here
},
},
}
// Should fail, transaction should roll back the INSERT.
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, Not(IsNil))
c.Assert(n, Equals, 2)
// INSERT should be rolled back
count, err := s.DbMap.SelectInt("SELECT COUNT(*) FROM people")
c.Assert(err, IsNil)
c.Assert(count, Equals, int64(0))
}
func (s *SqliteMigrateSuite) TestPlanMigration(c *C) {
migrations := &MemoryMigrationSource{
Migrations: []*Migration{
&Migration{
Id: "1_create_table.sql",
Up: []string{"CREATE TABLE people (id int)"},
Down: []string{"DROP TABLE people"},
},
&Migration{
Id: "2_alter_table.sql",
Up: []string{"ALTER TABLE people ADD COLUMN first_name text"},
Down: []string{"SELECT 0"}, // Not really supported
},
&Migration{
Id: "10_add_last_name.sql",
Up: []string{"ALTER TABLE people ADD COLUMN last_name text"},
Down: []string{"ALTER TABLE people DROP COLUMN last_name"},
},
},
}
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
migrations.Migrations = append(migrations.Migrations, &Migration{
Id: "11_add_middle_name.sql",
Up: []string{"ALTER TABLE people ADD COLUMN middle_name text"},
Down: []string{"ALTER TABLE people DROP COLUMN middle_name"},
})
plannedMigrations, _, err := PlanMigration(s.Db, "sqlite3", migrations, Up, 0)
c.Assert(err, IsNil)
c.Assert(plannedMigrations, HasLen, 1)
c.Assert(plannedMigrations[0].Migration, Equals, migrations.Migrations[3])
plannedMigrations, _, err = PlanMigration(s.Db, "sqlite3", migrations, Down, 0)
c.Assert(err, IsNil)
c.Assert(plannedMigrations, HasLen, 3)
c.Assert(plannedMigrations[0].Migration, Equals, migrations.Migrations[2])
c.Assert(plannedMigrations[1].Migration, Equals, migrations.Migrations[1])
c.Assert(plannedMigrations[2].Migration, Equals, migrations.Migrations[0])
}
func (s *SqliteMigrateSuite) TestPlanMigrationWithHoles(c *C) {
up := "SELECT 0"
down := "SELECT 1"
migrations := &MemoryMigrationSource{
Migrations: []*Migration{
&Migration{
Id: "1",
Up: []string{up},
Down: []string{down},
},
&Migration{
Id: "3",
Up: []string{up},
Down: []string{down},
},
},
}
n, err := Exec(s.Db, "sqlite3", migrations, Up)
c.Assert(err, IsNil)
c.Assert(n, Equals, 2)
migrations.Migrations = append(migrations.Migrations, &Migration{
Id: "2",
Up: []string{up},
Down: []string{down},
})
migrations.Migrations = append(migrations.Migrations, &Migration{
Id: "4",
Up: []string{up},
Down: []string{down},
})
migrations.Migrations = append(migrations.Migrations, &Migration{
Id: "5",
Up: []string{up},
Down: []string{down},
})
// apply all the missing migrations
plannedMigrations, _, err := PlanMigration(s.Db, "sqlite3", migrations, Up, 0)
c.Assert(err, IsNil)
c.Assert(plannedMigrations, HasLen, 3)
c.Assert(plannedMigrations[0].Migration.Id, Equals, "2")
c.Assert(plannedMigrations[0].Queries[0], Equals, up)
c.Assert(plannedMigrations[1].Migration.Id, Equals, "4")
c.Assert(plannedMigrations[1].Queries[0], Equals, up)
c.Assert(plannedMigrations[2].Migration.Id, Equals, "5")
c.Assert(plannedMigrations[2].Queries[0], Equals, up)
// first catch up to current target state 123, then migrate down 1 step to 12
plannedMigrations, _, err = PlanMigration(s.Db, "sqlite3", migrations, Down, 1)
c.Assert(err, IsNil)
c.Assert(plannedMigrations, HasLen, 2)
c.Assert(plannedMigrations[0].Migration.Id, Equals, "2")
c.Assert(plannedMigrations[0].Queries[0], Equals, up)
c.Assert(plannedMigrations[1].Migration.Id, Equals, "3")
c.Assert(plannedMigrations[1].Queries[0], Equals, down)
// first catch up to current target state 123, then migrate down 2 steps to 1
plannedMigrations, _, err = PlanMigration(s.Db, "sqlite3", migrations, Down, 2)
c.Assert(err, IsNil)
c.Assert(plannedMigrations, HasLen, 3)
c.Assert(plannedMigrations[0].Migration.Id, Equals, "2")
c.Assert(plannedMigrations[0].Queries[0], Equals, up)
c.Assert(plannedMigrations[1].Migration.Id, Equals, "3")
c.Assert(plannedMigrations[1].Queries[0], Equals, down)
c.Assert(plannedMigrations[2].Migration.Id, Equals, "2")
c.Assert(plannedMigrations[2].Queries[0], Equals, down)
}

View file

@ -0,0 +1,34 @@
package migrate
import (
"sort"
. "gopkg.in/check.v1"
)
type SortSuite struct{}
var _ = Suite(&SortSuite{})
func (s *SortSuite) TestSortMigrations(c *C) {
var migrations = byId([]*Migration{
&Migration{Id: "10_abc", Up: nil, Down: nil},
&Migration{Id: "120_cde", Up: nil, Down: nil},
&Migration{Id: "1_abc", Up: nil, Down: nil},
&Migration{Id: "efg", Up: nil, Down: nil},
&Migration{Id: "2_cde", Up: nil, Down: nil},
&Migration{Id: "35_cde", Up: nil, Down: nil},
&Migration{Id: "3_efg", Up: nil, Down: nil},
&Migration{Id: "4_abc", Up: nil, Down: nil},
})
sort.Sort(migrations)
c.Assert(migrations, HasLen, 8)
c.Assert(migrations[0].Id, Equals, "1_abc")
c.Assert(migrations[1].Id, Equals, "2_cde")
c.Assert(migrations[2].Id, Equals, "3_efg")
c.Assert(migrations[3].Id, Equals, "4_abc")
c.Assert(migrations[4].Id, Equals, "10_abc")
c.Assert(migrations[5].Id, Equals, "35_cde")
c.Assert(migrations[6].Id, Equals, "120_cde")
c.Assert(migrations[7].Id, Equals, "efg")
}

View file

@ -0,0 +1,63 @@
package main
import (
"fmt"
"github.com/rubenv/sql-migrate"
)
func ApplyMigrations(dir migrate.MigrationDirection, dryrun bool, limit int) error {
env, err := GetEnvironment()
if err != nil {
return fmt.Errorf("Could not parse config: %s", err)
}
db, dialect, err := GetConnection(env)
if err != nil {
return err
}
source := migrate.FileMigrationSource{
Dir: env.Dir,
}
if dryrun {
migrations, _, err := migrate.PlanMigration(db, dialect, source, dir, limit)
if err != nil {
return fmt.Errorf("Cannot plan migration: %s", err)
}
for _, m := range migrations {
PrintMigration(m, dir)
}
} else {
n, err := migrate.ExecMax(db, dialect, source, dir, limit)
if err != nil {
return fmt.Errorf("Migration failed: %s", err)
}
if n == 1 {
ui.Output("Applied 1 migration")
} else {
ui.Output(fmt.Sprintf("Applied %d migrations", n))
}
}
return nil
}
func PrintMigration(m *migrate.PlannedMigration, dir migrate.MigrationDirection) {
if dir == migrate.Up {
ui.Output(fmt.Sprintf("==> Would apply migration %s (up)", m.Id))
for _, q := range m.Up {
ui.Output(q)
}
} else if dir == migrate.Down {
ui.Output(fmt.Sprintf("==> Would apply migration %s (down)", m.Id))
for _, q := range m.Down {
ui.Output(q)
}
} else {
panic("Not reached")
}
}

View file

@ -0,0 +1,55 @@
package main
import (
"flag"
"strings"
"github.com/rubenv/sql-migrate"
)
type DownCommand struct {
}
func (c *DownCommand) Help() string {
helpText := `
Usage: sql-migrate down [options] ...
Undo a database migration.
Options:
-config=dbconfig.yml Configuration file to use.
-env="development" Environment.
-limit=1 Limit the number of migrations (0 = unlimited).
-dryrun Don't apply migrations, just print them.
`
return strings.TrimSpace(helpText)
}
func (c *DownCommand) Synopsis() string {
return "Undo a database migration"
}
func (c *DownCommand) Run(args []string) int {
var limit int
var dryrun bool
cmdFlags := flag.NewFlagSet("down", flag.ContinueOnError)
cmdFlags.Usage = func() { ui.Output(c.Help()) }
cmdFlags.IntVar(&limit, "limit", 1, "Max number of migrations to apply.")
cmdFlags.BoolVar(&dryrun, "dryrun", false, "Don't apply migrations, just print them.")
ConfigFlags(cmdFlags)
if err := cmdFlags.Parse(args); err != nil {
return 1
}
err := ApplyMigrations(migrate.Down, dryrun, limit)
if err != nil {
ui.Error(err.Error())
return 1
}
return 0
}

View file

@ -0,0 +1,88 @@
package main
import (
"flag"
"fmt"
"strings"
"github.com/rubenv/sql-migrate"
)
type RedoCommand struct {
}
func (c *RedoCommand) Help() string {
helpText := `
Usage: sql-migrate redo [options] ...
Reapply the last migration.
Options:
-config=dbconfig.yml Configuration file to use.
-env="development" Environment.
-dryrun Don't apply migrations, just print them.
`
return strings.TrimSpace(helpText)
}
func (c *RedoCommand) Synopsis() string {
return "Reapply the last migration"
}
func (c *RedoCommand) Run(args []string) int {
var dryrun bool
cmdFlags := flag.NewFlagSet("redo", flag.ContinueOnError)
cmdFlags.Usage = func() { ui.Output(c.Help()) }
cmdFlags.BoolVar(&dryrun, "dryrun", false, "Don't apply migrations, just print them.")
ConfigFlags(cmdFlags)
if err := cmdFlags.Parse(args); err != nil {
return 1
}
env, err := GetEnvironment()
if err != nil {
ui.Error(fmt.Sprintf("Could not parse config: %s", err))
return 1
}
db, dialect, err := GetConnection(env)
if err != nil {
ui.Error(err.Error())
return 1
}
source := migrate.FileMigrationSource{
Dir: env.Dir,
}
migrations, _, err := migrate.PlanMigration(db, dialect, source, migrate.Down, 1)
if len(migrations) == 0 {
ui.Output("Nothing to do!")
return 0
}
if dryrun {
PrintMigration(migrations[0], migrate.Down)
PrintMigration(migrations[0], migrate.Up)
} else {
_, err := migrate.ExecMax(db, dialect, source, migrate.Down, 1)
if err != nil {
ui.Error(fmt.Sprintf("Migration (down) failed: %s", err))
return 1
}
_, err = migrate.ExecMax(db, dialect, source, migrate.Up, 1)
if err != nil {
ui.Error(fmt.Sprintf("Migration (up) failed: %s", err))
return 1
}
ui.Output(fmt.Sprintf("Reapplied migration %s.", migrations[0].Id))
}
return 0
}

View file

@ -0,0 +1,113 @@
package main
import (
"flag"
"fmt"
"os"
"strings"
"time"
"github.com/olekukonko/tablewriter"
"github.com/rubenv/sql-migrate"
)
type StatusCommand struct {
}
func (c *StatusCommand) Help() string {
helpText := `
Usage: sql-migrate status [options] ...
Show migration status.
Options:
-config=dbconfig.yml Configuration file to use.
-env="development" Environment.
`
return strings.TrimSpace(helpText)
}
func (c *StatusCommand) Synopsis() string {
return "Show migration status"
}
func (c *StatusCommand) Run(args []string) int {
cmdFlags := flag.NewFlagSet("status", flag.ContinueOnError)
cmdFlags.Usage = func() { ui.Output(c.Help()) }
ConfigFlags(cmdFlags)
if err := cmdFlags.Parse(args); err != nil {
return 1
}
env, err := GetEnvironment()
if err != nil {
ui.Error(fmt.Sprintf("Could not parse config: %s", err))
return 1
}
db, dialect, err := GetConnection(env)
if err != nil {
ui.Error(err.Error())
return 1
}
source := migrate.FileMigrationSource{
Dir: env.Dir,
}
migrations, err := source.FindMigrations()
if err != nil {
ui.Error(err.Error())
return 1
}
records, err := migrate.GetMigrationRecords(db, dialect)
if err != nil {
ui.Error(err.Error())
return 1
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Migration", "Applied"})
table.SetColWidth(60)
rows := make(map[string]*statusRow)
for _, m := range migrations {
rows[m.Id] = &statusRow{
Id: m.Id,
Migrated: false,
}
}
for _, r := range records {
rows[r.Id].Migrated = true
rows[r.Id].AppliedAt = r.AppliedAt
}
for _, m := range migrations {
if rows[m.Id].Migrated {
table.Append([]string{
m.Id,
rows[m.Id].AppliedAt.String(),
})
} else {
table.Append([]string{
m.Id,
"no",
})
}
}
table.Render()
return 0
}
type statusRow struct {
Id string
Migrated bool
AppliedAt time.Time
}

View file

@ -0,0 +1,55 @@
package main
import (
"flag"
"strings"
"github.com/rubenv/sql-migrate"
)
type UpCommand struct {
}
func (c *UpCommand) Help() string {
helpText := `
Usage: sql-migrate up [options] ...
Migrates the database to the most recent version available.
Options:
-config=dbconfig.yml Configuration file to use.
-env="development" Environment.
-limit=0 Limit the number of migrations (0 = unlimited).
-dryrun Don't apply migrations, just print them.
`
return strings.TrimSpace(helpText)
}
func (c *UpCommand) Synopsis() string {
return "Migrates the database to the most recent version available"
}
func (c *UpCommand) Run(args []string) int {
var limit int
var dryrun bool
cmdFlags := flag.NewFlagSet("up", flag.ContinueOnError)
cmdFlags.Usage = func() { ui.Output(c.Help()) }
cmdFlags.IntVar(&limit, "limit", 0, "Max number of migrations to apply.")
cmdFlags.BoolVar(&dryrun, "dryrun", false, "Don't apply migrations, just print them.")
ConfigFlags(cmdFlags)
if err := cmdFlags.Parse(args); err != nil {
return 1
}
err := ApplyMigrations(migrate.Up, dryrun, limit)
if err != nil {
ui.Error(err.Error())
return 1
}
return 0
}

View file

@ -0,0 +1,103 @@
package main
import (
"database/sql"
"errors"
"flag"
"fmt"
"io/ioutil"
"github.com/rubenv/sql-migrate"
"gopkg.in/gorp.v1"
"gopkg.in/yaml.v1"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
)
var dialects = map[string]gorp.Dialect{
"sqlite3": gorp.SqliteDialect{},
"postgres": gorp.PostgresDialect{},
"mysql": gorp.MySQLDialect{"InnoDB", "UTF8"},
}
var ConfigFile string
var ConfigEnvironment string
func ConfigFlags(f *flag.FlagSet) {
f.StringVar(&ConfigFile, "config", "dbconfig.yml", "Configuration file to use.")
f.StringVar(&ConfigEnvironment, "env", "development", "Environment to use.")
}
type Environment struct {
Dialect string `yaml:"dialect"`
DataSource string `yaml:"datasource"`
Dir string `yaml:"dir"`
TableName string `yaml:"table"`
SchemaName string `yaml:"schema"`
}
func ReadConfig() (map[string]*Environment, error) {
file, err := ioutil.ReadFile(ConfigFile)
if err != nil {
return nil, err
}
config := make(map[string]*Environment)
err = yaml.Unmarshal(file, config)
if err != nil {
return nil, err
}
return config, nil
}
func GetEnvironment() (*Environment, error) {
config, err := ReadConfig()
if err != nil {
return nil, err
}
env := config[ConfigEnvironment]
if env == nil {
return nil, errors.New("No environment: " + ConfigEnvironment)
}
if env.Dialect == "" {
return nil, errors.New("No dialect specified")
}
if env.DataSource == "" {
return nil, errors.New("No data source specified")
}
if env.Dir == "" {
env.Dir = "migrations"
}
if env.TableName != "" {
migrate.SetTable(env.TableName)
}
if env.SchemaName != "" {
migrate.SetSchema(env.SchemaName)
}
return env, nil
}
func GetConnection(env *Environment) (*sql.DB, string, error) {
db, err := sql.Open(env.Dialect, env.DataSource)
if err != nil {
return nil, "", fmt.Errorf("Cannot connect to database: %s", err)
}
// Make sure we only accept dialects that were compiled in.
_, exists := dialects[env.Dialect]
if !exists {
return nil, "", fmt.Errorf("Unsupported dialect: %s", env.Dialect)
}
return db, env.Dialect, nil
}

View file

@ -0,0 +1,46 @@
package main
import (
"fmt"
"os"
"github.com/mitchellh/cli"
)
func main() {
os.Exit(realMain())
}
var ui cli.Ui
func realMain() int {
ui = &cli.BasicUi{Writer: os.Stdout}
cli := &cli.CLI{
Args: os.Args[1:],
Commands: map[string]cli.CommandFactory{
"up": func() (cli.Command, error) {
return &UpCommand{}, nil
},
"down": func() (cli.Command, error) {
return &DownCommand{}, nil
},
"redo": func() (cli.Command, error) {
return &RedoCommand{}, nil
},
"status": func() (cli.Command, error) {
return &StatusCommand{}, nil
},
},
HelpFunc: cli.BasicHelpFunc("sql-migrate"),
Version: "1.0.0",
}
exitCode, err := cli.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err.Error())
return 1
}
return exitCode
}

View file

@ -0,0 +1 @@
package main

View file

@ -0,0 +1,12 @@
// +build go1.3
package main
import (
_ "github.com/denisenkom/go-mssqldb"
"gopkg.in/gorp.v1"
)
func init() {
dialects["mssql"] = gorp.SqlServerDialect{}
}

View file

@ -0,0 +1,28 @@
# SQL migration parser
Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser.
## License
(The MIT License)
Copyright (C) 2014 by Ruben Vermeersch <ruben@rocketeer.be>
Copyright (C) 2012-2014 by Liam Staskawicz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -0,0 +1,128 @@
package sqlparse
import (
"bufio"
"bytes"
"errors"
"io"
"strings"
)
const sqlCmdPrefix = "-- +migrate "
// Checks the line to see if the line has a statement-ending semicolon
// or if the line contains a double-dash comment.
func endsWithSemicolon(line string) bool {
prev := ""
scanner := bufio.NewScanner(strings.NewReader(line))
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
word := scanner.Text()
if strings.HasPrefix(word, "--") {
break
}
prev = word
}
return strings.HasSuffix(prev, ";")
}
// Split the given sql script into individual statements.
//
// The base case is to simply split on semicolons, as these
// naturally terminate a statement.
//
// However, more complex cases like pl/pgsql can have semicolons
// within a statement. For these cases, we provide the explicit annotations
// 'StatementBegin' and 'StatementEnd' to allow the script to
// tell us to ignore semicolons.
func SplitSQLStatements(r io.ReadSeeker, direction bool) ([]string, error) {
_, err := r.Seek(0, 0)
if err != nil {
return nil, err
}
var buf bytes.Buffer
scanner := bufio.NewScanner(r)
// track the count of each section
// so we can diagnose scripts with no annotations
upSections := 0
downSections := 0
statementEnded := false
ignoreSemicolons := false
directionIsActive := false
stmts := make([]string, 0)
for scanner.Scan() {
line := scanner.Text()
// handle any migrate-specific commands
if strings.HasPrefix(line, sqlCmdPrefix) {
cmd := strings.TrimSpace(line[len(sqlCmdPrefix):])
switch cmd {
case "Up":
directionIsActive = (direction == true)
upSections++
break
case "Down":
directionIsActive = (direction == false)
downSections++
break
case "StatementBegin":
if directionIsActive {
ignoreSemicolons = true
}
break
case "StatementEnd":
if directionIsActive {
statementEnded = (ignoreSemicolons == true)
ignoreSemicolons = false
}
break
}
}
if !directionIsActive {
continue
}
if _, err := buf.WriteString(line + "\n"); err != nil {
return nil, err
}
// Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement
// Lines that end with semicolon that are in a statement block
// do not conclude statement.
if (!ignoreSemicolons && endsWithSemicolon(line)) || statementEnded {
statementEnded = false
stmts = append(stmts, buf.String())
buf.Reset()
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
// diagnose likely migration script errors
if ignoreSemicolons {
return nil, errors.New("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'")
}
if upSections == 0 && downSections == 0 {
return nil, errors.New(`ERROR: no Up/Down annotations found, so no statements were executed.
See https://github.com/rubenv/sql-migrate for details.`)
}
return stmts, nil
}

View file

@ -0,0 +1,151 @@
package sqlparse
import (
"strings"
"testing"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type SqlParseSuite struct {
}
var _ = Suite(&SqlParseSuite{})
func (s *SqlParseSuite) TestSemicolons(c *C) {
type testData struct {
line string
result bool
}
tests := []testData{
{
line: "END;",
result: true,
},
{
line: "END; -- comment",
result: true,
},
{
line: "END ; -- comment",
result: true,
},
{
line: "END -- comment",
result: false,
},
{
line: "END -- comment ;",
result: false,
},
{
line: "END \" ; \" -- comment",
result: false,
},
}
for _, test := range tests {
r := endsWithSemicolon(test.line)
c.Assert(r, Equals, test.result)
}
}
func (s *SqlParseSuite) TestSplitStatements(c *C) {
type testData struct {
sql string
direction bool
count int
}
tests := []testData{
{
sql: functxt,
direction: true,
count: 2,
},
{
sql: functxt,
direction: false,
count: 2,
},
{
sql: multitxt,
direction: true,
count: 2,
},
{
sql: multitxt,
direction: false,
count: 2,
},
}
for _, test := range tests {
stmts, err := SplitSQLStatements(strings.NewReader(test.sql), test.direction)
c.Assert(err, IsNil)
c.Assert(stmts, HasLen, test.count)
}
}
var functxt = `-- +migrate Up
CREATE TABLE IF NOT EXISTS histories (
id BIGSERIAL PRIMARY KEY,
current_value varchar(2000) NOT NULL,
created_at timestamp with time zone NOT NULL
);
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION histories_partition_creation( DATE, DATE )
returns void AS $$
DECLARE
create_query text;
BEGIN
FOR create_query IN SELECT
'CREATE TABLE IF NOT EXISTS histories_'
|| TO_CHAR( d, 'YYYY_MM' )
|| ' ( CHECK( created_at >= timestamp '''
|| TO_CHAR( d, 'YYYY-MM-DD 00:00:00' )
|| ''' AND created_at < timestamp '''
|| TO_CHAR( d + INTERVAL '1 month', 'YYYY-MM-DD 00:00:00' )
|| ''' ) ) inherits ( histories );'
FROM generate_series( $1, $2, '1 month' ) AS d
LOOP
EXECUTE create_query;
END LOOP; -- LOOP END
END; -- FUNCTION END
$$
language plpgsql;
-- +migrate StatementEnd
-- +migrate Down
drop function histories_partition_creation(DATE, DATE);
drop TABLE histories;
`
// test multiple up/down transitions in a single script
var multitxt = `-- +migrate Up
CREATE TABLE post (
id int NOT NULL,
title text,
body text,
PRIMARY KEY(id)
);
-- +migrate Down
DROP TABLE post;
-- +migrate Up
CREATE TABLE fancier_post (
id int NOT NULL,
title text,
body text,
created_on timestamp without time zone,
PRIMARY KEY(id)
);
-- +migrate Down
DROP TABLE fancier_post;
`

View file

@ -0,0 +1,20 @@
postgres:
dialect: postgres
datasource: dbname=test sslmode=disable
dir: test-migrations
mysql:
dialect: mysql
datasource: root@/test?parseTime=true
dir: test-migrations
mysql_noflag:
dialect: mysql
datasource: root@/test
dir: test-migrations
sqlite:
dialect: sqlite3
datasource: test.db
dir: test-migrations
table: migrations

View file

@ -0,0 +1,10 @@
#!/bin/bash
# Tweak PATH for Travis
export PATH=$PATH:$HOME/gopath/bin
OPTIONS="-config=test-integration/dbconfig.yml -env mysql_noflag"
set -ex
sql-migrate status $OPTIONS | grep -q "Make sure that the parseTime option is supplied"

View file

@ -0,0 +1,14 @@
#!/bin/bash
# Tweak PATH for Travis
export PATH=$PATH:$HOME/gopath/bin
OPTIONS="-config=test-integration/dbconfig.yml -env mysql"
set -ex
sql-migrate status $OPTIONS
sql-migrate up $OPTIONS
sql-migrate down $OPTIONS
sql-migrate redo $OPTIONS
sql-migrate status $OPTIONS

View file

@ -0,0 +1,14 @@
#!/bin/bash
# Tweak PATH for Travis
export PATH=$PATH:$HOME/gopath/bin
OPTIONS="-config=test-integration/dbconfig.yml -env postgres"
set -ex
sql-migrate status $OPTIONS
sql-migrate up $OPTIONS
sql-migrate down $OPTIONS
sql-migrate redo $OPTIONS
sql-migrate status $OPTIONS

View file

@ -0,0 +1,17 @@
#!/bin/bash
# Tweak PATH for Travis
export PATH=$PATH:$HOME/gopath/bin
OPTIONS="-config=test-integration/dbconfig.yml -env sqlite"
set -ex
sql-migrate status $OPTIONS
sql-migrate up $OPTIONS
sql-migrate down $OPTIONS
sql-migrate redo $OPTIONS
sql-migrate status $OPTIONS
# Should have used the custom migrations table
sqlite3 test.db "SELECT COUNT(*) FROM migrations"

View file

@ -0,0 +1,8 @@
-- +migrate Up
-- SQL in section 'Up' is executed when this migration is applied
CREATE TABLE people (id int);
-- +migrate Down
-- SQL section 'Down' is executed when this migration is rolled back
DROP TABLE people;

View file

@ -0,0 +1,5 @@
-- +migrate Up
INSERT INTO people (id) VALUES (1);
-- +migrate Down
DELETE FROM people WHERE id=1;

View file

@ -0,0 +1,101 @@
package migrate
import (
"sort"
. "gopkg.in/check.v1"
)
var toapplyMigrations = []*Migration{
&Migration{Id: "abc", Up: nil, Down: nil},
&Migration{Id: "cde", Up: nil, Down: nil},
&Migration{Id: "efg", Up: nil, Down: nil},
}
type ToApplyMigrateSuite struct {
}
var _ = Suite(&ToApplyMigrateSuite{})
func (s *ToApplyMigrateSuite) TestGetAll(c *C) {
toApply := ToApply(toapplyMigrations, "", Up)
c.Assert(toApply, HasLen, 3)
c.Assert(toApply[0], Equals, toapplyMigrations[0])
c.Assert(toApply[1], Equals, toapplyMigrations[1])
c.Assert(toApply[2], Equals, toapplyMigrations[2])
}
func (s *ToApplyMigrateSuite) TestGetAbc(c *C) {
toApply := ToApply(toapplyMigrations, "abc", Up)
c.Assert(toApply, HasLen, 2)
c.Assert(toApply[0], Equals, toapplyMigrations[1])
c.Assert(toApply[1], Equals, toapplyMigrations[2])
}
func (s *ToApplyMigrateSuite) TestGetCde(c *C) {
toApply := ToApply(toapplyMigrations, "cde", Up)
c.Assert(toApply, HasLen, 1)
c.Assert(toApply[0], Equals, toapplyMigrations[2])
}
func (s *ToApplyMigrateSuite) TestGetDone(c *C) {
toApply := ToApply(toapplyMigrations, "efg", Up)
c.Assert(toApply, HasLen, 0)
toApply = ToApply(toapplyMigrations, "zzz", Up)
c.Assert(toApply, HasLen, 0)
}
func (s *ToApplyMigrateSuite) TestDownDone(c *C) {
toApply := ToApply(toapplyMigrations, "", Down)
c.Assert(toApply, HasLen, 0)
}
func (s *ToApplyMigrateSuite) TestDownCde(c *C) {
toApply := ToApply(toapplyMigrations, "cde", Down)
c.Assert(toApply, HasLen, 2)
c.Assert(toApply[0], Equals, toapplyMigrations[1])
c.Assert(toApply[1], Equals, toapplyMigrations[0])
}
func (s *ToApplyMigrateSuite) TestDownAbc(c *C) {
toApply := ToApply(toapplyMigrations, "abc", Down)
c.Assert(toApply, HasLen, 1)
c.Assert(toApply[0], Equals, toapplyMigrations[0])
}
func (s *ToApplyMigrateSuite) TestDownAll(c *C) {
toApply := ToApply(toapplyMigrations, "efg", Down)
c.Assert(toApply, HasLen, 3)
c.Assert(toApply[0], Equals, toapplyMigrations[2])
c.Assert(toApply[1], Equals, toapplyMigrations[1])
c.Assert(toApply[2], Equals, toapplyMigrations[0])
toApply = ToApply(toapplyMigrations, "zzz", Down)
c.Assert(toApply, HasLen, 3)
c.Assert(toApply[0], Equals, toapplyMigrations[2])
c.Assert(toApply[1], Equals, toapplyMigrations[1])
c.Assert(toApply[2], Equals, toapplyMigrations[0])
}
func (s *ToApplyMigrateSuite) TestAlphaNumericMigrations(c *C) {
var migrations = byId([]*Migration{
&Migration{Id: "10_abc", Up: nil, Down: nil},
&Migration{Id: "1_abc", Up: nil, Down: nil},
&Migration{Id: "efg", Up: nil, Down: nil},
&Migration{Id: "2_cde", Up: nil, Down: nil},
&Migration{Id: "35_cde", Up: nil, Down: nil},
})
sort.Sort(migrations)
toApplyUp := ToApply(migrations, "2_cde", Up)
c.Assert(toApplyUp, HasLen, 3)
c.Assert(toApplyUp[0].Id, Equals, "10_abc")
c.Assert(toApplyUp[1].Id, Equals, "35_cde")
c.Assert(toApplyUp[2].Id, Equals, "efg")
toApplyDown := ToApply(migrations, "2_cde", Down)
c.Assert(toApplyDown, HasLen, 2)
c.Assert(toApplyDown[0].Id, Equals, "2_cde")
c.Assert(toApplyDown[1].Id, Equals, "1_abc")
}

8
Godeps/_workspace/src/gopkg.in/gorp.v1/.gitignore generated vendored Normal file
View file

@ -0,0 +1,8 @@
_test
_testmain.go
_obj
*~
*.6
6.out
gorptest.bin
tmp

21
Godeps/_workspace/src/gopkg.in/gorp.v1/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,21 @@
language: go
go:
- 1.1
- tip
services:
- mysql
- postgres
- sqlite3
before_script:
- mysql -e "CREATE DATABASE gorptest;"
- mysql -u root -e "GRANT ALL ON gorptest.* TO gorptest@localhost IDENTIFIED BY 'gorptest'"
- psql -c "CREATE DATABASE gorptest;" -U postgres
- psql -c "CREATE USER "gorptest" WITH SUPERUSER PASSWORD 'gorptest';" -U postgres
- go get github.com/lib/pq
- go get github.com/mattn/go-sqlite3
- go get github.com/ziutek/mymysql/godrv
- go get github.com/go-sql-driver/mysql
script: ./test_all.sh

22
Godeps/_workspace/src/gopkg.in/gorp.v1/LICENSE generated vendored Normal file
View file

@ -0,0 +1,22 @@
(The MIT License)
Copyright (c) 2012 James Cooper <james@bitmechanic.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

6
Godeps/_workspace/src/gopkg.in/gorp.v1/Makefile generated vendored Normal file
View file

@ -0,0 +1,6 @@
include $(GOROOT)/src/Make.inc
TARG = github.com/coopernurse/gorp
GOFILES = gorp.go dialect.go
include $(GOROOT)/src/Make.pkg

672
Godeps/_workspace/src/gopkg.in/gorp.v1/README.md generated vendored Normal file
View file

@ -0,0 +1,672 @@
# Go Relational Persistence
[![build status](https://secure.travis-ci.org/go-gorp/gorp.png)](http://travis-ci.org/go-gorp/gorp)
I hesitate to call gorp an ORM. Go doesn't really have objects, at least
not in the classic Smalltalk/Java sense. There goes the "O". gorp doesn't
know anything about the relationships between your structs (at least not
yet). So the "R" is questionable too (but I use it in the name because,
well, it seemed more clever).
The "M" is alive and well. Given some Go structs and a database, gorp
should remove a fair amount of boilerplate busy-work from your code.
I hope that gorp saves you time, minimizes the drudgery of getting data
in and out of your database, and helps your code focus on algorithms,
not infrastructure.
* Bind struct fields to table columns via API or tag
* Support for embedded structs
* Support for transactions
* Forward engineer db schema from structs (great for unit tests)
* Pre/post insert/update/delete hooks
* Automatically generate insert/update/delete statements for a struct
* Automatic binding of auto increment PKs back to struct after insert
* Delete by primary key(s)
* Select by primary key(s)
* Optional trace sql logging
* Bind arbitrary SQL queries to a struct
* Bind slice to SELECT query results without type assertions
* Use positional or named bind parameters in custom SELECT queries
* Optional optimistic locking using a version column (for update/deletes)
## Installation
# install the library:
go get gopkg.in/gorp.v1
// use in your .go code:
import (
"gopkg.in/gorp.v1"
)
## Versioning
This project provides a stable release (v1.x tags) and a bleeding edge codebase (master).
`gopkg.in/gorp.v1` points to the latest v1.x tag. The API's for v1 are stable and shouldn't change. Development takes place at the master branch. Althought the code in master should always compile and test successfully, it might break API's. We aim to maintain backwards compatibility, but API's and behaviour might be changed to fix a bug. Also note that API's that are new in the master branch can change until released as v2.
If you want to use bleeding edge, use `github.com/go-gorp/gorp` as import path.
## API Documentation
Full godoc output from the latest v1 release is available here:
https://godoc.org/gopkg.in/gorp.v1
For the latest code in master:
https://godoc.org/github.com/go-gorp/gorp
## Quickstart
```go
package main
import (
"database/sql"
"gopkg.in/gorp.v1"
_ "github.com/mattn/go-sqlite3"
"log"
"time"
)
func main() {
// initialize the DbMap
dbmap := initDb()
defer dbmap.Db.Close()
// delete any existing rows
err := dbmap.TruncateTables()
checkErr(err, "TruncateTables failed")
// create two posts
p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum")
p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum")
// insert rows - auto increment PKs will be set properly after the insert
err = dbmap.Insert(&p1, &p2)
checkErr(err, "Insert failed")
// use convenience SelectInt
count, err := dbmap.SelectInt("select count(*) from posts")
checkErr(err, "select count(*) failed")
log.Println("Rows after inserting:", count)
// update a row
p2.Title = "Go 1.2 is better than ever"
count, err = dbmap.Update(&p2)
checkErr(err, "Update failed")
log.Println("Rows updated:", count)
// fetch one row - note use of "post_id" instead of "Id" since column is aliased
//
// Postgres users should use $1 instead of ? placeholders
// See 'Known Issues' below
//
err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id)
checkErr(err, "SelectOne failed")
log.Println("p2 row:", p2)
// fetch all rows
var posts []Post
_, err = dbmap.Select(&posts, "select * from posts order by post_id")
checkErr(err, "Select failed")
log.Println("All rows:")
for x, p := range posts {
log.Printf(" %d: %v\n", x, p)
}
// delete row by PK
count, err = dbmap.Delete(&p1)
checkErr(err, "Delete failed")
log.Println("Rows deleted:", count)
// delete row manually via Exec
_, err = dbmap.Exec("delete from posts where post_id=?", p2.Id)
checkErr(err, "Exec failed")
// confirm count is zero
count, err = dbmap.SelectInt("select count(*) from posts")
checkErr(err, "select count(*) failed")
log.Println("Row count - should be zero:", count)
log.Println("Done!")
}
type Post struct {
// db tag lets you specify the column name if it differs from the struct field
Id int64 `db:"post_id"`
Created int64
Title string
Body string
}
func newPost(title, body string) Post {
return Post{
Created: time.Now().UnixNano(),
Title: title,
Body: body,
}
}
func initDb() *gorp.DbMap {
// connect to db using standard Go database/sql API
// use whatever database/sql driver you wish
db, err := sql.Open("sqlite3", "/tmp/post_db.bin")
checkErr(err, "sql.Open failed")
// construct a gorp DbMap
dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}}
// add a table, setting the table name to 'posts' and
// specifying that the Id property is an auto incrementing PK
dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id")
// create the table. in a production system you'd generally
// use a migration tool, or create the tables via scripts
err = dbmap.CreateTablesIfNotExists()
checkErr(err, "Create tables failed")
return dbmap
}
func checkErr(err error, msg string) {
if err != nil {
log.Fatalln(msg, err)
}
}
```
## Examples
### Mapping structs to tables
First define some types:
```go
type Invoice struct {
Id int64
Created int64
Updated int64
Memo string
PersonId int64
}
type Person struct {
Id int64
Created int64
Updated int64
FName string
LName string
}
// Example of using tags to alias fields to column names
// The 'db' value is the column name
//
// A hyphen will cause gorp to skip this field, similar to the
// Go json package.
//
// This is equivalent to using the ColMap methods:
//
// table := dbmap.AddTableWithName(Product{}, "product")
// table.ColMap("Id").Rename("product_id")
// table.ColMap("Price").Rename("unit_price")
// table.ColMap("IgnoreMe").SetTransient(true)
//
type Product struct {
Id int64 `db:"product_id"`
Price int64 `db:"unit_price"`
IgnoreMe string `db:"-"`
}
```
Then create a mapper, typically you'd do this one time at app startup:
```go
// connect to db using standard Go database/sql API
// use whatever database/sql driver you wish
db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword")
// construct a gorp DbMap
dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}}
// register the structs you wish to use with gorp
// you can also use the shorter dbmap.AddTable() if you
// don't want to override the table name
//
// SetKeys(true) means we have a auto increment primary key, which
// will get automatically bound to your struct post-insert
//
t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id")
t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id")
t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id")
```
### Struct Embedding
gorp supports embedding structs. For example:
```go
type Names struct {
FirstName string
LastName string
}
type WithEmbeddedStruct struct {
Id int64
Names
}
es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}}
err := dbmap.Insert(es)
```
See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example.
### Create/Drop Tables ###
Automatically create / drop registered tables. This is useful for unit tests
but is entirely optional. You can of course use gorp with tables created manually,
or with a separate migration tool (like goose: https://bitbucket.org/liamstask/goose).
```go
// create all registered tables
dbmap.CreateTables()
// same as above, but uses "if not exists" clause to skip tables that are
// already defined
dbmap.CreateTablesIfNotExists()
// drop
dbmap.DropTables()
```
### SQL Logging
Optionally you can pass in a logger to trace all SQL statements.
I recommend enabling this initially while you're getting the feel for what
gorp is doing on your behalf.
Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies.
However, you can write your own `GorpLogger` implementation, or use a package such
as `glog` if you want more control over how statements are logged.
```go
// Will log all SQL statements + args as they are run
// The first arg is a string prefix to prepend to all log messages
dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds))
// Turn off tracing
dbmap.TraceOff()
```
### Insert
```go
// Must declare as pointers so optional callback hooks
// can operate on your data, not copies
inv1 := &Invoice{0, 100, 200, "first order", 0}
inv2 := &Invoice{0, 100, 200, "second order", 0}
// Insert your rows
err := dbmap.Insert(inv1, inv2)
// Because we called SetKeys(true) on Invoice, the Id field
// will be populated after the Insert() automatically
fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id)
```
### Update
Continuing the above example, use the `Update` method to modify an Invoice:
```go
// count is the # of rows updated, which should be 1 in this example
count, err := dbmap.Update(inv1)
```
### Delete
If you have primary key(s) defined for a struct, you can use the `Delete`
method to remove rows:
```go
count, err := dbmap.Delete(inv1)
```
### Select by Key
Use the `Get` method to fetch a single row by primary key. It returns
nil if no row is found.
```go
// fetch Invoice with Id=99
obj, err := dbmap.Get(Invoice{}, 99)
inv := obj.(*Invoice)
```
### Ad Hoc SQL
#### SELECT
`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice
or a single struct.
```go
// Select a slice - first return value is not needed when a slice pointer is passed to Select()
var posts []Post
_, err := dbmap.Select(&posts, "select * from post order by id")
// You can also use primitive types
var ids []string
_, err := dbmap.Select(&ids, "select id from post")
// Select a single row.
// Returns an error if no row found, or if more than one row is found
var post Post
err := dbmap.SelectOne(&post, "select * from post where id=?", id)
```
Want to do joins? Just write the SQL and the struct. gorp will bind them:
```go
// Define a type for your join
// It *must* contain all the columns in your SELECT statement
//
// The names here should match the aliased column names you specify
// in your SQL - no additional binding work required. simple.
//
type InvoicePersonView struct {
InvoiceId int64
PersonId int64
Memo string
FName string
}
// Create some rows
p1 := &Person{0, 0, 0, "bob", "smith"}
dbmap.Insert(p1)
// notice how we can wire up p1.Id to the invoice easily
inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id}
dbmap.Insert(inv1)
// Run your query
query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " +
"from invoice_test i, person_test p " +
"where i.PersonId = p.Id"
// pass a slice to Select()
var list []InvoicePersonView
_, err := dbmap.Select(&list, query)
// this should test true
expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName}
if reflect.DeepEqual(list[0], expected) {
fmt.Println("Woot! My join worked!")
}
```
#### SELECT string or int64
gorp provides a few convenience methods for selecting a single string or int64.
```go
// select single int64 from db (use $1 instead of ? for postgresql)
i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal)
// select single string from db:
s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal)
```
#### Named bind parameters
You may use a map or struct to bind parameters by name. This is currently
only supported in SELECT queries.
```go
_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{
"name": "Rob",
"age": 31,
})
```
#### UPDATE / DELETE
You can execute raw SQL if you wish. Particularly good for batch operations.
```go
res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10)
```
### Transactions
You can batch operations into a transaction:
```go
func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error {
// Start a new transaction
trans, err := dbmap.Begin()
if err != nil {
return err
}
trans.Insert(per)
inv.PersonId = per.Id
trans.Insert(inv)
// if the commit is successful, a nil error is returned
return trans.Commit()
}
```
### Hooks
Use hooks to update data before/after saving to the db. Good for timestamps:
```go
// implement the PreInsert and PreUpdate hooks
func (i *Invoice) PreInsert(s gorp.SqlExecutor) error {
i.Created = time.Now().UnixNano()
i.Updated = i.Created
return nil
}
func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error {
i.Updated = time.Now().UnixNano()
return nil
}
// You can use the SqlExecutor to cascade additional SQL
// Take care to avoid cycles. gorp won't prevent them.
//
// Here's an example of a cascading delete
//
func (p *Person) PreDelete(s gorp.SqlExecutor) error {
query := "delete from invoice_test where PersonId=?"
err := s.Exec(query, p.Id); if err != nil {
return err
}
return nil
}
```
Full list of hooks that you can implement:
PostGet
PreInsert
PostInsert
PreUpdate
PostUpdate
PreDelete
PostDelete
All have the same signature. for example:
func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error
### Optimistic Locking
gorp provides a simple optimistic locking feature, similar to Java's JPA, that
will raise an error if you try to update/delete a row whose `version` column
has a value different than the one in memory. This provides a safe way to do
"select then update" style operations without explicit read and write locks.
```go
// Version is an auto-incremented number, managed by gorp
// If this property is present on your struct, update
// operations will be constrained
//
// For example, say we defined Person as:
type Person struct {
Id int64
Created int64
Updated int64
FName string
LName string
// automatically used as the Version col
// use table.SetVersionCol("columnName") to map a different
// struct field as the version field
Version int64
}
p1 := &Person{0, 0, 0, "Bob", "Smith", 0}
dbmap.Insert(p1) // Version is now 1
obj, err := dbmap.Get(Person{}, p1.Id)
p2 := obj.(*Person)
p2.LName = "Edwards"
dbmap.Update(p2) // Version is now 2
p1.LName = "Howard"
// Raises error because p1.Version == 1, which is out of date
count, err := dbmap.Update(p1)
_, ok := err.(gorp.OptimisticLockError)
if ok {
// should reach this statement
// in a real app you might reload the row and retry, or
// you might propegate this to the user, depending on the desired
// semantics
fmt.Printf("Tried to update row with stale data: %v\n", err)
} else {
// some other db error occurred - log or return up the stack
fmt.Printf("Unknown db err: %v\n", err)
}
```
## Database Drivers
gorp uses the Go 1 `database/sql` package. A full list of compliant drivers is available here:
http://code.google.com/p/go-wiki/wiki/SQLDrivers
Sadly, SQL databases differ on various issues. gorp provides a Dialect interface that should be
implemented per database vendor. Dialects are provided for:
* MySQL
* PostgreSQL
* sqlite3
Each of these three databases pass the test suite. See `gorp_test.go` for example
DSNs for these three databases.
Support is also provided for:
* Oracle (contributed by @klaidliadon)
* SQL Server (contributed by @qrawl) - use driver: github.com/denisenkom/go-mssqldb
Note that these databases are not covered by CI and I (@coopernurse) have no good way to
test them locally. So please try them and send patches as needed, but expect a bit more
unpredicability.
## Known Issues
### SQL placeholder portability
Different databases use different strings to indicate variable placeholders in
prepared SQL statements. Unlike some database abstraction layers (such as JDBC),
Go's `database/sql` does not standardize this.
SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` methods delegates
to a Dialect implementation for each database, and will generate portable SQL.
Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, etc will not be
parsed. Consequently you may have portability issues if you write a query like this:
```go
// works on MySQL and Sqlite3, but not with Postgresql
err := dbmap.SelectOne(&val, "select * from foo where id = ?", 30)
```
In `Select` and `SelectOne` you can use named parameters to work around this.
The following is portable:
```go
err := dbmap.SelectOne(&val, "select * from foo where id = :id",
map[string]interface{} { "id": 30})
```
### time.Time and time zones
gorp will pass `time.Time` fields through to the `database/sql` driver, but note that
the behavior of this type varies across database drivers.
MySQL users should be especially cautious. See: https://github.com/ziutek/mymysql/pull/77
To avoid any potential issues with timezone/DST, consider using an integer field for time
data and storing UNIX time.
## Running the tests
The included tests may be run against MySQL, Postgresql, or sqlite3.
You must set two environment variables so the test code knows which driver to
use, and how to connect to your database.
```sh
# MySQL example:
export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123
export GORP_TEST_DIALECT=mysql
# run the tests
go test
# run the tests and benchmarks
go test -bench="Bench" -benchtime 10
```
Valid `GORP_TEST_DIALECT` values are: "mysql", "postgres", "sqlite3"
See the `test_all.sh` script for examples of all 3 databases. This is the script I run
locally to test the library.
## Performance
gorp uses reflection to construct SQL queries and bind parameters. See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a simple perf test. On my MacBook Pro gorp is about 2-3% slower than hand written SQL.
## Help/Support
IRC: #gorp
Mailing list: gorp-dev@googlegroups.com
Bugs/Enhancements: Create a github issue
## Pull requests / Contributions
Contributions are very welcome. Please follow these guidelines:
* Fork the `master` branch and issue pull requests targeting the `master` branch
* If you are adding an enhancement, please open an issue first with your proposed change.
* Changes that break backwards compatibility in the public API are only accepted after we
discuss on a GitHub issue for a while.
Thanks!
## Contributors
* matthias-margush - column aliasing via tags
* Rob Figueiredo - @robfig
* Quinn Slack - @sqs

692
Godeps/_workspace/src/gopkg.in/gorp.v1/dialect.go generated vendored Normal file
View file

@ -0,0 +1,692 @@
package gorp
import (
"errors"
"fmt"
"reflect"
"strings"
)
// The Dialect interface encapsulates behaviors that differ across
// SQL databases. At present the Dialect is only used by CreateTables()
// but this could change in the future
type Dialect interface {
// adds a suffix to any query, usually ";"
QuerySuffix() string
// ToSqlType returns the SQL column type to use when creating a
// table of the given Go Type. maxsize can be used to switch based on
// size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB,
// or LONGBLOB depending on the maxsize
ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string
// string to append to primary key column definitions
AutoIncrStr() string
// string to bind autoincrement columns to. Empty string will
// remove reference to those columns in the INSERT statement.
AutoIncrBindValue() string
AutoIncrInsertSuffix(col *ColumnMap) string
// string to append to "create table" statement for vendor specific
// table attributes
CreateTableSuffix() string
// string to truncate tables
TruncateClause() string
// bind variable string to use when forming SQL statements
// in many dbs it is "?", but Postgres appears to use $1
//
// i is a zero based index of the bind variable in this statement
//
BindVar(i int) string
// Handles quoting of a field name to ensure that it doesn't raise any
// SQL parsing exceptions by using a reserved word as a field name.
QuoteField(field string) string
// Handles building up of a schema.database string that is compatible with
// the given dialect
//
// schema - The schema that <table> lives in
// table - The table name
QuotedTableForQuery(schema string, table string) string
// Existance clause for table creation / deletion
IfSchemaNotExists(command, schema string) string
IfTableExists(command, schema, table string) string
IfTableNotExists(command, schema, table string) string
}
// IntegerAutoIncrInserter is implemented by dialects that can perform
// inserts with automatically incremented integer primary keys. If
// the dialect can handle automatic assignment of more than just
// integers, see TargetedAutoIncrInserter.
type IntegerAutoIncrInserter interface {
InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error)
}
// TargetedAutoIncrInserter is implemented by dialects that can
// perform automatic assignment of any primary key type (i.e. strings
// for uuids, integers for serials, etc).
type TargetedAutoIncrInserter interface {
// InsertAutoIncrToTarget runs an insert operation and assigns the
// automatically generated primary key directly to the passed in
// target. The target should be a pointer to the primary key
// field of the value being inserted.
InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error
}
func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
res, err := exec.Exec(insertSql, params...)
if err != nil {
return 0, err
}
return res.LastInsertId()
}
///////////////////////////////////////////////////////
// sqlite3 //
/////////////
type SqliteDialect struct {
suffix string
}
func (d SqliteDialect) QuerySuffix() string { return ";" }
func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
switch val.Kind() {
case reflect.Ptr:
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
case reflect.Bool:
return "integer"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return "integer"
case reflect.Float64, reflect.Float32:
return "real"
case reflect.Slice:
if val.Elem().Kind() == reflect.Uint8 {
return "blob"
}
}
switch val.Name() {
case "NullInt64":
return "integer"
case "NullFloat64":
return "real"
case "NullBool":
return "integer"
case "Time":
return "datetime"
}
if maxsize < 1 {
maxsize = 255
}
return fmt.Sprintf("varchar(%d)", maxsize)
}
// Returns autoincrement
func (d SqliteDialect) AutoIncrStr() string {
return "autoincrement"
}
func (d SqliteDialect) AutoIncrBindValue() string {
return "null"
}
func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
return ""
}
// Returns suffix
func (d SqliteDialect) CreateTableSuffix() string {
return d.suffix
}
// With sqlite, there technically isn't a TRUNCATE statement,
// but a DELETE FROM uses a truncate optimization:
// http://www.sqlite.org/lang_delete.html
func (d SqliteDialect) TruncateClause() string {
return "delete from"
}
// Returns "?"
func (d SqliteDialect) BindVar(i int) string {
return "?"
}
func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
return standardInsertAutoIncr(exec, insertSql, params...)
}
func (d SqliteDialect) QuoteField(f string) string {
return `"` + f + `"`
}
// sqlite does not have schemas like PostgreSQL does, so just escape it like normal
func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string {
return d.QuoteField(table)
}
func (d SqliteDialect) IfSchemaNotExists(command, schema string) string {
return fmt.Sprintf("%s if not exists", command)
}
func (d SqliteDialect) IfTableExists(command, schema, table string) string {
return fmt.Sprintf("%s if exists", command)
}
func (d SqliteDialect) IfTableNotExists(command, schema, table string) string {
return fmt.Sprintf("%s if not exists", command)
}
///////////////////////////////////////////////////////
// PostgreSQL //
////////////////
type PostgresDialect struct {
suffix string
}
func (d PostgresDialect) QuerySuffix() string { return ";" }
func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
switch val.Kind() {
case reflect.Ptr:
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
case reflect.Bool:
return "boolean"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32:
if isAutoIncr {
return "serial"
}
return "integer"
case reflect.Int64, reflect.Uint64:
if isAutoIncr {
return "bigserial"
}
return "bigint"
case reflect.Float64:
return "double precision"
case reflect.Float32:
return "real"
case reflect.Slice:
if val.Elem().Kind() == reflect.Uint8 {
return "bytea"
}
}
switch val.Name() {
case "NullInt64":
return "bigint"
case "NullFloat64":
return "double precision"
case "NullBool":
return "boolean"
case "Time":
return "timestamp with time zone"
}
if maxsize > 0 {
return fmt.Sprintf("varchar(%d)", maxsize)
} else {
return "text"
}
}
// Returns empty string
func (d PostgresDialect) AutoIncrStr() string {
return ""
}
func (d PostgresDialect) AutoIncrBindValue() string {
return "default"
}
func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
return " returning " + col.ColumnName
}
// Returns suffix
func (d PostgresDialect) CreateTableSuffix() string {
return d.suffix
}
func (d PostgresDialect) TruncateClause() string {
return "truncate"
}
// Returns "$(i+1)"
func (d PostgresDialect) BindVar(i int) string {
return fmt.Sprintf("$%d", i+1)
}
func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error {
rows, err := exec.query(insertSql, params...)
if err != nil {
return err
}
defer rows.Close()
if rows.Next() {
err := rows.Scan(target)
return err
}
return errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error())
}
func (d PostgresDialect) QuoteField(f string) string {
return `"` + strings.ToLower(f) + `"`
}
func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string {
if strings.TrimSpace(schema) == "" {
return d.QuoteField(table)
}
return schema + "." + d.QuoteField(table)
}
func (d PostgresDialect) IfSchemaNotExists(command, schema string) string {
return fmt.Sprintf("%s if not exists", command)
}
func (d PostgresDialect) IfTableExists(command, schema, table string) string {
return fmt.Sprintf("%s if exists", command)
}
func (d PostgresDialect) IfTableNotExists(command, schema, table string) string {
return fmt.Sprintf("%s if not exists", command)
}
///////////////////////////////////////////////////////
// MySQL //
///////////
// Implementation of Dialect for MySQL databases.
type MySQLDialect struct {
// Engine is the storage engine to use "InnoDB" vs "MyISAM" for example
Engine string
// Encoding is the character encoding to use for created tables
Encoding string
}
func (d MySQLDialect) QuerySuffix() string { return ";" }
func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
switch val.Kind() {
case reflect.Ptr:
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
case reflect.Bool:
return "boolean"
case reflect.Int8:
return "tinyint"
case reflect.Uint8:
return "tinyint unsigned"
case reflect.Int16:
return "smallint"
case reflect.Uint16:
return "smallint unsigned"
case reflect.Int, reflect.Int32:
return "int"
case reflect.Uint, reflect.Uint32:
return "int unsigned"
case reflect.Int64:
return "bigint"
case reflect.Uint64:
return "bigint unsigned"
case reflect.Float64, reflect.Float32:
return "double"
case reflect.Slice:
if val.Elem().Kind() == reflect.Uint8 {
return "mediumblob"
}
}
switch val.Name() {
case "NullInt64":
return "bigint"
case "NullFloat64":
return "double"
case "NullBool":
return "tinyint"
case "Time":
return "datetime"
}
if maxsize < 1 {
maxsize = 255
}
return fmt.Sprintf("varchar(%d)", maxsize)
}
// Returns auto_increment
func (d MySQLDialect) AutoIncrStr() string {
return "auto_increment"
}
func (d MySQLDialect) AutoIncrBindValue() string {
return "null"
}
func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
return ""
}
// Returns engine=%s charset=%s based on values stored on struct
func (d MySQLDialect) CreateTableSuffix() string {
if d.Engine == "" || d.Encoding == "" {
msg := "gorp - undefined"
if d.Engine == "" {
msg += " MySQLDialect.Engine"
}
if d.Engine == "" && d.Encoding == "" {
msg += ","
}
if d.Encoding == "" {
msg += " MySQLDialect.Encoding"
}
msg += ". Check that your MySQLDialect was correctly initialized when declared."
panic(msg)
}
return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding)
}
func (d MySQLDialect) TruncateClause() string {
return "truncate"
}
// Returns "?"
func (d MySQLDialect) BindVar(i int) string {
return "?"
}
func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
return standardInsertAutoIncr(exec, insertSql, params...)
}
func (d MySQLDialect) QuoteField(f string) string {
return "`" + f + "`"
}
func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string {
if strings.TrimSpace(schema) == "" {
return d.QuoteField(table)
}
return schema + "." + d.QuoteField(table)
}
func (d MySQLDialect) IfSchemaNotExists(command, schema string) string {
return fmt.Sprintf("%s if not exists", command)
}
func (d MySQLDialect) IfTableExists(command, schema, table string) string {
return fmt.Sprintf("%s if exists", command)
}
func (d MySQLDialect) IfTableNotExists(command, schema, table string) string {
return fmt.Sprintf("%s if not exists", command)
}
///////////////////////////////////////////////////////
// Sql Server //
////////////////
// Implementation of Dialect for Microsoft SQL Server databases.
// Tested on SQL Server 2008 with driver: github.com/denisenkom/go-mssqldb
type SqlServerDialect struct {
suffix string
}
func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
switch val.Kind() {
case reflect.Ptr:
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
case reflect.Bool:
return "bit"
case reflect.Int8:
return "tinyint"
case reflect.Uint8:
return "smallint"
case reflect.Int16:
return "smallint"
case reflect.Uint16:
return "int"
case reflect.Int, reflect.Int32:
return "int"
case reflect.Uint, reflect.Uint32:
return "bigint"
case reflect.Int64:
return "bigint"
case reflect.Uint64:
return "bigint"
case reflect.Float32:
return "real"
case reflect.Float64:
return "float(53)"
case reflect.Slice:
if val.Elem().Kind() == reflect.Uint8 {
return "varbinary"
}
}
switch val.Name() {
case "NullInt64":
return "bigint"
case "NullFloat64":
return "float(53)"
case "NullBool":
return "tinyint"
case "Time":
return "datetime"
}
if maxsize < 1 {
maxsize = 255
}
return fmt.Sprintf("varchar(%d)", maxsize)
}
// Returns auto_increment
func (d SqlServerDialect) AutoIncrStr() string {
return "identity(0,1)"
}
// Empty string removes autoincrement columns from the INSERT statements.
func (d SqlServerDialect) AutoIncrBindValue() string {
return ""
}
func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
return ""
}
// Returns suffix
func (d SqlServerDialect) CreateTableSuffix() string {
return d.suffix
}
func (d SqlServerDialect) TruncateClause() string {
return "delete from"
}
// Returns "?"
func (d SqlServerDialect) BindVar(i int) string {
return "?"
}
func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
return standardInsertAutoIncr(exec, insertSql, params...)
}
func (d SqlServerDialect) QuoteField(f string) string {
return `"` + f + `"`
}
func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string {
if strings.TrimSpace(schema) == "" {
return table
}
return schema + "." + table
}
func (d SqlServerDialect) QuerySuffix() string { return ";" }
func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string {
s := fmt.Sprintf("if not exists (select name from sys.schemas where name = '%s') %s", schema, command)
return s
}
func (d SqlServerDialect) IfTableExists(command, schema, table string) string {
var schema_clause string
if strings.TrimSpace(schema) != "" {
schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema)
}
s := fmt.Sprintf("if exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command)
return s
}
func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string {
var schema_clause string
if strings.TrimSpace(schema) != "" {
schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema)
}
s := fmt.Sprintf("if not exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command)
return s
}
///////////////////////////////////////////////////////
// Oracle //
///////////
// Implementation of Dialect for Oracle databases.
type OracleDialect struct{}
func (d OracleDialect) QuerySuffix() string { return "" }
func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
switch val.Kind() {
case reflect.Ptr:
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
case reflect.Bool:
return "boolean"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32:
if isAutoIncr {
return "serial"
}
return "integer"
case reflect.Int64, reflect.Uint64:
if isAutoIncr {
return "bigserial"
}
return "bigint"
case reflect.Float64:
return "double precision"
case reflect.Float32:
return "real"
case reflect.Slice:
if val.Elem().Kind() == reflect.Uint8 {
return "bytea"
}
}
switch val.Name() {
case "NullInt64":
return "bigint"
case "NullFloat64":
return "double precision"
case "NullBool":
return "boolean"
case "NullTime", "Time":
return "timestamp with time zone"
}
if maxsize > 0 {
return fmt.Sprintf("varchar(%d)", maxsize)
} else {
return "text"
}
}
// Returns empty string
func (d OracleDialect) AutoIncrStr() string {
return ""
}
func (d OracleDialect) AutoIncrBindValue() string {
return "default"
}
func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
return " returning " + col.ColumnName
}
// Returns suffix
func (d OracleDialect) CreateTableSuffix() string {
return ""
}
func (d OracleDialect) TruncateClause() string {
return "truncate"
}
// Returns "$(i+1)"
func (d OracleDialect) BindVar(i int) string {
return fmt.Sprintf(":%d", i+1)
}
func (d OracleDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
rows, err := exec.query(insertSql, params...)
if err != nil {
return 0, err
}
defer rows.Close()
if rows.Next() {
var id int64
err := rows.Scan(&id)
return id, err
}
return 0, errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error())
}
func (d OracleDialect) QuoteField(f string) string {
return `"` + strings.ToUpper(f) + `"`
}
func (d OracleDialect) QuotedTableForQuery(schema string, table string) string {
if strings.TrimSpace(schema) == "" {
return d.QuoteField(table)
}
return schema + "." + d.QuoteField(table)
}
func (d OracleDialect) IfSchemaNotExists(command, schema string) string {
return fmt.Sprintf("%s if not exists", command)
}
func (d OracleDialect) IfTableExists(command, schema, table string) string {
return fmt.Sprintf("%s if exists", command)
}
func (d OracleDialect) IfTableNotExists(command, schema, table string) string {
return fmt.Sprintf("%s if not exists", command)
}

26
Godeps/_workspace/src/gopkg.in/gorp.v1/errors.go generated vendored Normal file
View file

@ -0,0 +1,26 @@
package gorp
import (
"fmt"
)
// A non-fatal error, when a select query returns columns that do not exist
// as fields in the struct it is being mapped to
type NoFieldInTypeError struct {
TypeName string
MissingColNames []string
}
func (err *NoFieldInTypeError) Error() string {
return fmt.Sprintf("gorp: No fields %+v in type %s", err.MissingColNames, err.TypeName)
}
// returns true if the error is non-fatal (ie, we shouldn't immediately return)
func NonFatalError(err error) bool {
switch err.(type) {
case *NoFieldInTypeError:
return true
default:
return false
}
}

2085
Godeps/_workspace/src/gopkg.in/gorp.v1/gorp.go generated vendored Normal file

File diff suppressed because it is too large Load diff

2083
Godeps/_workspace/src/gopkg.in/gorp.v1/gorp_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

22
Godeps/_workspace/src/gopkg.in/gorp.v1/test_all.sh generated vendored Normal file
View file

@ -0,0 +1,22 @@
#!/bin/sh
# on macs, you may need to:
# export GOBUILDFLAG=-ldflags -linkmode=external
set -e
export GORP_TEST_DSN=gorptest/gorptest/gorptest
export GORP_TEST_DIALECT=mysql
go test $GOBUILDFLAG .
export GORP_TEST_DSN=gorptest:gorptest@/gorptest
export GORP_TEST_DIALECT=gomysql
go test $GOBUILDFLAG .
export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable"
export GORP_TEST_DIALECT=postgres
go test $GOBUILDFLAG .
export GORP_TEST_DSN=/tmp/gorptest.bin
export GORP_TEST_DIALECT=sqlite
go test $GOBUILDFLAG .

15
build
View file

@ -3,6 +3,21 @@
export GOPATH=${PWD}/Godeps/_workspace export GOPATH=${PWD}/Godeps/_workspace
export GOBIN=${PWD}/bin export GOBIN=${PWD}/bin
if command -v go-bindata &>/dev/null; then
DEX_MIGRATE_FROM_DISK=${DEX_MIGRATE_FROM_DISK:=false}
echo "Turning migrations into ./db/migrations/assets.go"
if [ "$DEX_MIGRATE_FROM_DISK" = true ]; then
echo "Compiling migrations.go: will read migrations from disk."
else
echo "Compiling migrations into migrations.go"
fi
go-bindata -debug=$DEX_MIGRATE_FROM_DISK -modtime=1 -pkg migrations -o ./db/migrations/assets.go ./db/migrations
gofmt -w ./db/migrations/assets.go
else
echo "Could not find go-bindata in path, will not generate migrations"
fi
rm -rf $GOPATH/src/github.com/coreos/dex rm -rf $GOPATH/src/github.com/coreos/dex
mkdir -p $GOPATH/src/github.com/coreos/ mkdir -p $GOPATH/src/github.com/coreos/
ln -s ${PWD} $GOPATH/src/github.com/coreos/dex ln -s ${PWD} $GOPATH/src/github.com/coreos/dex

View file

@ -15,6 +15,7 @@ import (
"github.com/coreos/dex/db" "github.com/coreos/dex/db"
pflag "github.com/coreos/dex/pkg/flag" pflag "github.com/coreos/dex/pkg/flag"
"github.com/coreos/dex/pkg/log" "github.com/coreos/dex/pkg/log"
ptime "github.com/coreos/dex/pkg/time"
"github.com/coreos/dex/server" "github.com/coreos/dex/server"
"github.com/coreos/dex/user" "github.com/coreos/dex/user"
) )
@ -29,14 +30,17 @@ func main() {
fs := flag.NewFlagSet("dex-overlord", flag.ExitOnError) fs := flag.NewFlagSet("dex-overlord", flag.ExitOnError)
secret := fs.String("key-secret", "", "symmetric key used to encrypt/decrypt signing key data in DB") secret := fs.String("key-secret", "", "symmetric key used to encrypt/decrypt signing key data in DB")
dbURL := fs.String("db-url", "", "DSN-formatted database connection string") dbURL := fs.String("db-url", "", "DSN-formatted database connection string")
dbMigrate := fs.Bool("db-migrate", true, "perform database migrations when starting up overlord. This includes the initial DB objects creation.")
keyPeriod := fs.Duration("key-period", 24*time.Hour, "length of time for-which a given key will be valid") keyPeriod := fs.Duration("key-period", 24*time.Hour, "length of time for-which a given key will be valid")
gcInterval := fs.Duration("gc-interval", time.Hour, "length of time between garbage collection runs") gcInterval := fs.Duration("gc-interval", time.Hour, "length of time between garbage collection runs")
adminListen := fs.String("admin-listen", "http://0.0.0.0:5557", "scheme, host and port for listening for administrative operation requests ") adminListen := fs.String("admin-listen", "http://0.0.0.0:5557", "scheme, host and port for listening for administrative operation requests ")
localConnectorID := fs.String("local-connector", "local", "ID of the local connector")
logDebug := fs.Bool("log-debug", false, "log debug-level information") logDebug := fs.Bool("log-debug", false, "log debug-level information")
logTimestamps := fs.Bool("log-timestamps", false, "prefix log lines with timestamps") logTimestamps := fs.Bool("log-timestamps", false, "prefix log lines with timestamps")
localConnectorID := fs.String("local-connector", "local", "ID of the local connector")
if err := fs.Parse(os.Args[1:]); err != nil { if err := fs.Parse(os.Args[1:]); err != nil {
fmt.Fprintln(os.Stderr, err.Error()) fmt.Fprintln(os.Stderr, err.Error())
@ -74,6 +78,19 @@ func main() {
log.Fatalf(err.Error()) log.Fatalf(err.Error())
} }
if *dbMigrate {
var sleep time.Duration
for {
if migrations, err := db.MigrateToLatest(dbc); err == nil {
log.Infof("Performed %d db migrations", migrations)
break
}
sleep = ptime.ExpBackoff(sleep, time.Minute)
log.Errorf("Unable to migrate database, retrying in %v: %v", sleep, err)
time.Sleep(sleep)
}
}
userRepo := db.NewUserRepo(dbc) userRepo := db.NewUserRepo(dbc)
pwiRepo := db.NewPasswordInfoRepo(dbc) pwiRepo := db.NewPasswordInfoRepo(dbc)
userManager := user.NewManager(userRepo, userManager := user.NewManager(userRepo,

View file

@ -66,7 +66,7 @@ type clientIdentityModel struct {
ID string `db:"id"` ID string `db:"id"`
Secret []byte `db:"secret"` Secret []byte `db:"secret"`
Metadata string `db:"metadata"` Metadata string `db:"metadata"`
DexAdmin bool `db:"dexAdmin"` DexAdmin bool `db:"dex_admin"`
} }
func newClientMetadataJSON(cm *oidc.ClientMetadata) *clientMetadataJSON { func newClientMetadataJSON(cm *oidc.ClientMetadata) *clientMetadataJSON {

View file

@ -5,13 +5,11 @@ import (
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
"time"
"github.com/coopernurse/gorp" "github.com/coopernurse/gorp"
_ "github.com/lib/pq" _ "github.com/lib/pq"
"github.com/coreos/dex/pkg/log" "github.com/coreos/dex/pkg/log"
ptime "github.com/coreos/dex/pkg/time"
"github.com/coreos/dex/repo" "github.com/coreos/dex/repo"
) )
@ -73,16 +71,6 @@ func NewConnection(cfg Config) (*gorp.DbMap, error) {
} }
} }
var sleep time.Duration
for {
if err = dbm.CreateTablesIfNotExists(); err == nil {
break
}
sleep = ptime.ExpBackoff(sleep, time.Minute)
log.Errorf("Unable to initialize database, retrying in %v: %v", sleep, err)
time.Sleep(sleep)
}
return &dbm, nil return &dbm, nil
} }

52
db/migrate.go Normal file
View file

@ -0,0 +1,52 @@
package db
import (
"fmt"
"github.com/coopernurse/gorp"
"github.com/lib/pq"
migrate "github.com/rubenv/sql-migrate"
"github.com/coreos/dex/db/migrations"
)
const (
migrationDialect = "postgres"
migrationTable = "dex_migrations"
migrationDir = "db/migrations"
)
func init() {
migrate.SetTable(migrationTable)
}
func MigrateToLatest(dbMap *gorp.DbMap) (int, error) {
source := getSource()
return migrate.Exec(dbMap.Db, migrationDialect, source, migrate.Up)
}
func MigrateMaxMigrations(dbMap *gorp.DbMap, max int) (int, error) {
source := getSource()
return migrate.ExecMax(dbMap.Db, migrationDialect, source, migrate.Up, max)
}
func GetPlannedMigrations(dbMap *gorp.DbMap) ([]*migrate.PlannedMigration, error) {
migrations, _, err := migrate.PlanMigration(dbMap.Db, migrationDialect, getSource(), migrate.Up, 0)
return migrations, err
}
func DropMigrationsTable(dbMap *gorp.DbMap) error {
qt := pq.QuoteIdentifier(migrationTable)
_, err := dbMap.Exec(fmt.Sprintf("drop table if exists %s ;", qt))
return err
}
func getSource() migrate.MigrationSource {
return &migrate.AssetMigrationSource{
Dir: migrationDir,
Asset: migrations.Asset,
AssetDir: migrations.AssetDir,
}
}

42
db/migrate_test.go Normal file
View file

@ -0,0 +1,42 @@
package db
import (
"fmt"
"os"
"testing"
"github.com/coopernurse/gorp"
)
func initDB(dsn string) *gorp.DbMap {
c, err := NewConnection(Config{DSN: dsn})
if err != nil {
panic(fmt.Sprintf("error making db connection: %q", err))
}
if err = c.DropTablesIfExists(); err != nil {
panic(fmt.Sprintf("Unable to drop database tables: %v", err))
}
return c
}
// TestGetPlannedMigrations is a sanity check, ensuring that at least one
// migration can be found.
func TestGetPlannedMigrations(t *testing.T) {
dsn := os.Getenv("DEX_TEST_DSN")
if dsn == "" {
t.Logf("Test will not run without DEX_TEST_DSN environment variable.")
return
}
dbMap := initDB(dsn)
ms, err := GetPlannedMigrations(dbMap)
if err != nil {
pwd, err := os.Getwd()
t.Logf("pwd: %v", pwd)
t.Fatalf("unexpected err: %q", err)
}
if len(ms) == 0 {
t.Fatalf("expected non-empty migrations")
}
}

View file

@ -0,0 +1,47 @@
-- +migrate Up
CREATE TABLE IF NOT EXISTS "authd_user" (
"id" text not null primary key,
"email" text,
"email_verified" boolean,
"display_name" text,
"admin" boolean) ;
CREATE TABLE IF NOT EXISTS "client_identity" (
"id" text not null primary key,
"secret" bytea,
"metadata" text);
CREATE TABLE IF NOT EXISTS "connector_config" (
"id" text not null primary key,
"type" text, "config" text) ;
CREATE TABLE IF NOT EXISTS "key" (
"value" bytea not null primary key) ;
CREATE TABLE IF NOT EXISTS "password_info" (
"user_id" text not null primary key,
"password" text,
"password_expires" bigint) ;
CREATE TABLE IF NOT EXISTS "session" (
"id" text not null primary key,
"state" text,
"created_at" bigint,
"expires_at" bigint,
"client_id" text,
"client_state" text,
"redirect_url" text, "identity" text,
"connector_id" text,
"user_id" text, "register" boolean) ;
CREATE TABLE IF NOT EXISTS "session_key" (
"key" text not null primary key,
"session_id" text,
"expires_at" bigint,
"stale" boolean) ;
CREATE TABLE IF NOT EXISTS "remote_identity_mapping" (
"connector_id" text not null,
"user_id" text,
"remote_id" text not null,
primary key ("connector_id", "remote_id")) ;

View file

@ -0,0 +1,2 @@
-- +migrate Up
ALTER TABLE client_identity ADD COLUMN "dex_admin" boolean;

View file

@ -0,0 +1,2 @@
-- +migrate Up
ALTER TABLE authd_user ADD COLUMN "created_at" bigint;

View file

@ -0,0 +1,2 @@
-- +migrate Up
ALTER TABLE session ADD COLUMN "nonce" text;

View file

@ -0,0 +1,21 @@
-- +migrate Up
CREATE TABLE refresh_token (
id bigint NOT NULL,
payload_hash text,
user_id text,
client_id text
);
CREATE SEQUENCE refresh_token_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE refresh_token_id_seq OWNED BY refresh_token.id;
ALTER TABLE ONLY refresh_token ALTER COLUMN id SET DEFAULT nextval('refresh_token_id_seq'::regclass);
ALTER TABLE ONLY refresh_token
ADD CONSTRAINT refresh_token_pkey PRIMARY KEY (id);

View file

@ -0,0 +1,3 @@
-- +migrate Up
ALTER TABLE ONLY authd_user
ADD CONSTRAINT authd_user_email_key UNIQUE (email);

377
db/migrations/assets.go Normal file

File diff suppressed because one or more lines are too long

View file

@ -14,7 +14,10 @@ import (
) )
const ( const (
userTableName = "dex_user" // This table is named authd_user for historical reasons; namely, that the
// original name of the project was authd, and there are existing tables out
// there that we don't want to have to rename in production.
userTableName = "authd_user"
remoteIdentityMappingTableName = "remote_identity_mapping" remoteIdentityMappingTableName = "remote_identity_mapping"
) )

View file

@ -40,10 +40,12 @@ func connect(t *testing.T) *gorp.DbMap {
t.Fatalf("Unable to drop database tables: %v", err) t.Fatalf("Unable to drop database tables: %v", err)
} }
if err = c.CreateTablesIfNotExists(); err != nil { if err = db.DropMigrationsTable(c); err != nil {
t.Fatalf("Unable to create database tables: %v", err) panic(fmt.Sprintf("Unable to drop migration table: %v", err))
} }
db.MigrateToLatest(c)
return c return c
} }

View file

@ -18,8 +18,10 @@ func initDB(dsn string) *gorp.DbMap {
panic(fmt.Sprintf("Unable to drop database tables: %v", err)) panic(fmt.Sprintf("Unable to drop database tables: %v", err))
} }
if err = c.CreateTablesIfNotExists(); err != nil { if err = db.DropMigrationsTable(c); err != nil {
panic(fmt.Sprintf("Unable to create database tables: %v", err)) panic(fmt.Sprintf("Unable to drop migration table: %v", err))
} }
db.MigrateToLatest(c)
return c return c
} }