|
|
|
@ -1,446 +1,76 @@
|
|
|
|
|
// Code generated by go-bindata.
|
|
|
|
|
// sources:
|
|
|
|
|
// 0001_initial_migration.sql
|
|
|
|
|
// 0002_dex_admin.sql
|
|
|
|
|
// 0003_user_created_at.sql
|
|
|
|
|
// 0004_session_nonce.sql
|
|
|
|
|
// 0005_refresh_token_create.sql
|
|
|
|
|
// 0006_user_email_unique.sql
|
|
|
|
|
// 0007_session_scope.sql
|
|
|
|
|
// 0008_users_active_or_inactive.sql
|
|
|
|
|
// 0009_key_not_primary_key.sql
|
|
|
|
|
// 0010_client_metadata_field_changed.sql
|
|
|
|
|
// DO NOT EDIT!
|
|
|
|
|
|
|
|
|
|
package migrations
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"bytes"
|
|
|
|
|
"compress/gzip"
|
|
|
|
|
"fmt"
|
|
|
|
|
"io"
|
|
|
|
|
"io/ioutil"
|
|
|
|
|
"os"
|
|
|
|
|
"path/filepath"
|
|
|
|
|
"strings"
|
|
|
|
|
"time"
|
|
|
|
|
)
|
|
|
|
|
// THIS FILE WAS GENERATED BY gen.go DO NOT EDIT
|
|
|
|
|
|
|
|
|
|
func bindataRead(data []byte, name string) ([]byte, error) {
|
|
|
|
|
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("Read %q: %v", name, err)
|
|
|
|
|
}
|
|
|
|
|
import "github.com/rubenv/sql-migrate"
|
|
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
_, err = io.Copy(&buf, gz)
|
|
|
|
|
clErr := gz.Close()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("Read %q: %v", name, err)
|
|
|
|
|
}
|
|
|
|
|
if clErr != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return buf.Bytes(), nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type asset struct {
|
|
|
|
|
bytes []byte
|
|
|
|
|
info os.FileInfo
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type bindataFileInfo struct {
|
|
|
|
|
name string
|
|
|
|
|
size int64
|
|
|
|
|
mode os.FileMode
|
|
|
|
|
modTime time.Time
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (fi bindataFileInfo) Name() string {
|
|
|
|
|
return fi.name
|
|
|
|
|
}
|
|
|
|
|
func (fi bindataFileInfo) Size() int64 {
|
|
|
|
|
return fi.size
|
|
|
|
|
}
|
|
|
|
|
func (fi bindataFileInfo) Mode() os.FileMode {
|
|
|
|
|
return fi.mode
|
|
|
|
|
}
|
|
|
|
|
func (fi bindataFileInfo) ModTime() time.Time {
|
|
|
|
|
return fi.modTime
|
|
|
|
|
}
|
|
|
|
|
func (fi bindataFileInfo) IsDir() bool {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
func (fi bindataFileInfo) Sys() interface{} {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0001_initial_migrationSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x9c\x93\xcf\x8a\xdb\x30\x10\xc6\xef\x79\x0a\x91\xd3\x86\x66\x9f\xa0\xa7\x6d\x71\x61\xa1\xb4\xd0\x75\xa1\x37\x31\xb5\x26\xee\x50\xfd\x43\x1a\x6f\xd7\x6f\x5f\xd9\x4e\x14\xc7\xce\xc6\x26\x81\x84\x58\xd6\xfc\xe6\x9b\x6f\x66\x1e\x1f\xc5\x07\x43\x75\x00\x46\xf1\xd3\x6f\x3e\xff\x28\x9e\xca\x42\x94\x4f\x9f\xbe\x16\xe2\xf9\x8b\xf8\xf6\xbd\x14\xc5\xaf\xe7\x97\xf2\x45\x6c\xa1\xe1\x3f\x4a\x36\x11\xc3\x56\x3c\x6c\xc4\xf0\xd9\x92\xda\x0a\xc6\x37\x16\xd6\xa5\x6f\xa3\xb5\xf0\x81\x0c\x84\x56\xfc\xc5\x76\x9f\xaf\xa1\x01\xd2\xc3\xcd\xc9\xa1\x7c\xc5\x40\x07\xc2\xc4\xf9\xed\x9c\x46\xb0\xe7\x0b\x8a\xa2\xd7\xd0\x4a\x0b\x06\xa7\xc1\xa0\x0c\xd9\x1c\xb3\x13\x1f\x37\x37\xd5\x57\x9a\xd0\xb2\x24\x95\x7e\x89\xdb\x3b\x4a\x88\x58\x05\xe4\x94\xb1\x65\x84\xf3\xb1\x41\x06\x05\x0c\x03\x63\xb7\x24\xc3\x59\x8b\x15\xbb\x20\xd3\xbf\x03\xd5\x77\xe8\xe0\xd6\x9f\xcc\xe8\x79\x3d\xa5\xcf\xbd\xe4\x41\xc2\x8c\xf3\xbd\x82\x6e\xf0\x58\xcf\xd5\x9c\x8b\x40\x0f\x31\xfe\x73\x41\x49\xb2\x07\x37\x46\x77\x53\x22\x57\xd6\x73\x82\x4c\x1b\x9c\xe1\xf8\xe6\x29\x60\x4c\x4a\xa9\x26\xbb\x5c\x66\xc4\x18\xc9\xd9\x7b\x5a\xcc\x69\x0f\xa6\x3a\x52\xd7\xd3\xa9\x92\xc0\x27\x05\xa3\x09\x1e\xa4\x5d\x7d\x97\x27\x6e\x06\x1c\x5e\x5c\x4d\x16\x50\x25\x60\xc5\xb2\x09\x3a\x37\xf9\x3c\xb4\x13\x50\x1e\xa6\x79\x92\x8b\x0e\xec\x3b\x70\x4d\x91\xbb\xdd\x5d\xbb\x31\x47\x1b\xe5\x64\x6a\xfa\xc7\x55\xeb\x32\x84\xcf\xa5\xdd\x32\x2d\x99\xa2\x71\xbd\xc6\x80\xc6\x31\xe6\xad\x96\x06\xbc\x27\x7b\xb1\x55\x73\x93\xb2\xf0\xf7\xdc\x1a\x75\xe3\x88\x7f\x2f\x70\x54\xb9\x78\xb8\x4c\xb5\x1f\x87\xef\xba\x4a\xfe\x07\x00\x00\xff\xff\xae\xbe\x65\x61\x6c\x05\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0001_initial_migrationSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0001_initial_migrationSql,
|
|
|
|
|
"db/migrations/0001_initial_migration.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0001_initial_migrationSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0001_initial_migrationSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0001_initial_migration.sql", size: 1388, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0002_dex_adminSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\xce\xc9\x4c\xcd\x2b\x89\xcf\x4c\x01\x92\x99\x25\x95\x0a\x8e\x2e\x2e\x0a\xce\xfe\x3e\xa1\xbe\x7e\x0a\x4a\x29\xa9\x15\xf1\x89\x29\xb9\x99\x79\x4a\x0a\x49\xf9\xf9\x39\xa9\x89\x79\xd6\x5c\x5c\xa1\x01\x2e\x8e\x21\xae\x0a\x4a\x68\x1a\x95\x14\x82\x5d\x43\x50\xb4\xd8\x2a\xa4\x25\xe6\x14\xa7\x5a\x73\x01\x02\x00\x00\xff\xff\x89\xb3\xb1\x5d\x7e\x00\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0002_dex_adminSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0002_dex_adminSql,
|
|
|
|
|
"db/migrations/0002_dex_admin.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0002_dex_adminSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0002_dex_adminSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0002_dex_admin.sql", size: 126, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0003_user_created_atSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x2d\xc9\x48\x89\x2f\x2d\x4e\x2d\x52\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x50\x4a\x2e\x4a\x05\x2a\x4e\x89\x4f\x2c\x51\x52\x48\xca\x4c\xcf\xcc\x2b\xb1\xe6\xe2\x0a\x0d\x70\x71\x0c\x41\xd1\x12\xec\x1a\x82\xaa\xd6\x56\xc1\xc0\x9a\x0b\x10\x00\x00\xff\xff\x28\xce\xe1\xe1\x6f\x00\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0003_user_created_atSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0003_user_created_atSql,
|
|
|
|
|
"db/migrations/0003_user_created_at.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0003_user_created_atSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0003_user_created_atSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0003_user_created_at.sql", size: 111, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0004_session_nonceSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4e\x2d\x2e\xce\xcc\xcf\x53\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x50\xca\xcb\xcf\x4b\x4e\x55\x52\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x77\x11\x16\x8b\x3c\x00\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0004_session_nonceSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0004_session_nonceSql,
|
|
|
|
|
"db/migrations/0004_session_nonce.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0004_session_nonceSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0004_session_nonceSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0004_session_nonce.sql", size: 60, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0005_refresh_token_createSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x84\x90\x51\x4f\xc2\x30\x10\xc7\xdf\xf7\x29\xee\x0d\x88\x62\xc2\x2b\x3e\x95\xed\x0c\x8b\x5d\xa7\x5d\x2b\xf2\xb4\x14\x56\xa1\x61\x8e\xb9\x56\xe3\xbe\xbd\x65\x13\x0c\xc4\x68\xdf\xfa\xbb\xfb\xdf\x2f\x77\xe3\x31\x5c\xbd\x9a\x4d\xa3\x9c\x06\x59\x07\x21\x47\x22\x10\x04\x99\x51\x84\x46\xbf\x34\xda\x6e\x73\xb7\xdf\xe9\x0a\x86\x01\xf8\x67\x0a\x58\x99\x8d\xa9\x1c\xb0\x54\x00\x93\x94\x5e\x77\xbc\x56\x6d\xb9\x57\x45\xbe\x55\x76\x0b\xab\xd6\x69\xd5\xf3\x77\xab\x9b\xdc\x87\x9c\xfe\x74\x3d\x59\x97\x46\x57\xee\xc8\x82\xd1\x6d\x70\xb4\x66\xf8\x28\x91\x85\x17\x62\xdf\x99\x5b\xfd\xd6\x65\x33\x41\xb8\x80\x45\x2c\xe6\x30\xe9\x40\xcc\x7c\x36\x41\x26\x60\xb6\xfc\x46\x2c\x85\x24\x66\x4f\x84\x4a\x3c\xfd\xc9\xf3\xcf\x3f\x24\xe1\x1c\x61\xe2\xb5\x84\x0a\xe4\x7f\x5b\x21\x5d\x30\x8c\x0e\xc3\xcf\xaa\x37\xa6\x38\xe5\xfb\x5b\xa5\x8c\x5e\xf4\x40\x5f\x0e\x53\x2a\x13\x76\xb8\x5b\x86\x02\x22\xbc\x23\x92\x0a\xa8\xfc\xea\x1f\xaa\x1c\x0e\x7e\x93\x0e\xa6\xd3\x46\x6f\xd6\xa5\xb2\x76\xf4\xaf\xa6\xdb\x89\x44\x91\x17\xb1\x4c\x70\x12\xfb\x5b\x9c\x0f\xad\x77\xba\x85\x07\x1e\x27\x84\x2f\xe1\x1e\x97\x30\x34\x85\x9f\xfb\x15\x00\x00\xff\xff\xc4\xcc\xa8\xfd\xfa\x01\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0005_refresh_token_createSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0005_refresh_token_createSql,
|
|
|
|
|
"db/migrations/0005_refresh_token_create.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0005_refresh_token_createSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0005_refresh_token_createSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0005_refresh_token_create.sql", size: 506, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0006_user_email_uniqueSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\xf0\xf7\xf3\x89\x54\x48\x2c\x2d\xc9\x48\x89\x2f\x2d\x4e\x2d\xe2\x52\x00\x02\x47\x17\x17\x05\x67\x7f\xbf\xe0\x90\x20\x47\x4f\xbf\x10\x24\xd9\xf8\xd4\xdc\xc4\xcc\x9c\xf8\xec\xd4\x4a\x85\x50\x3f\xcf\xc0\x50\x57\x05\x0d\xb0\x88\xa6\x35\x17\x20\x00\x00\xff\xff\x18\x48\x0d\x30\x63\x00\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0006_user_email_uniqueSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0006_user_email_uniqueSql,
|
|
|
|
|
"db/migrations/0006_user_email_unique.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0006_user_email_uniqueSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0006_user_email_uniqueSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0006_user_email_unique.sql", size: 99, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0007_session_scopeSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4e\x2d\x2e\xce\xcc\xcf\x53\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x50\x2a\x4e\xce\x2f\x48\x55\x52\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x79\x7f\x20\x70\x3c\x00\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0007_session_scopeSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0007_session_scopeSql,
|
|
|
|
|
"db/migrations/0007_session_scope.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0007_session_scopeSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0007_session_scopeSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0007_session_scope.sql", size: 60, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0008_users_active_or_inactiveSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x2d\xc9\x48\x89\x2f\x2d\x4e\x2d\x52\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x48\xc9\x2c\x4e\x4c\xca\x49\x4d\x51\x48\xca\xcf\xcf\x49\x4d\xcc\xb3\xe6\xe2\x0a\x0d\x70\x71\x0c\x41\x51\x1f\xec\x1a\xa2\xa0\x04\x53\xa9\xa4\x60\xab\xe0\xe6\xe8\x13\xec\x6a\xcd\x05\x08\x00\x00\xff\xff\x06\x75\x98\x47\x6e\x00\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0008_users_active_or_inactiveSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0008_users_active_or_inactiveSql,
|
|
|
|
|
"db/migrations/0008_users_active_or_inactive.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0008_users_active_or_inactiveSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0008_users_active_or_inactiveSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0008_users_active_or_inactive.sql", size: 110, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0009_key_not_primary_keySql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\xc8\x4e\xad\x54\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x28\xc9\x2d\x88\x2f\x4b\xcc\x29\x4d\x55\x48\xaa\x2c\x49\x4d\xb4\xe6\x0a\x0d\x70\x71\x0c\x71\x55\xf0\x76\x8d\x54\x08\x76\x0d\x41\x92\xb7\x55\x00\xd3\xd6\x18\xa6\xb9\x04\xf9\x07\xc0\x8c\xc3\xa1\x24\xc8\xd5\xcf\xd1\xd7\x15\xa6\x48\x09\x6e\xa8\x92\x42\x49\xbe\x82\x12\x84\x69\xcd\x05\x08\x00\x00\xff\xff\x58\x6f\x17\x19\xb6\x00\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0009_key_not_primary_keySqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0009_key_not_primary_keySql,
|
|
|
|
|
"db/migrations/0009_key_not_primary_key.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0009_key_not_primary_keySql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0009_key_not_primary_keySqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0009_key_not_primary_key.sql", size: 182, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _dbMigrations0010_client_metadata_field_changedSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x0a\x0d\x70\x71\x0c\x71\x55\x48\xce\xc9\x4c\xcd\x2b\x89\xcf\x4c\x01\x92\x99\x25\x95\x5c\xc1\xae\x21\x0a\xb9\xa9\x25\x89\x29\x89\x25\x89\x0a\xb6\x0a\x25\xa9\x15\x25\x1a\x5c\x0a\x40\x90\x55\x9c\x9f\x17\x9f\x54\x9a\x99\x93\x12\x9f\x9f\x94\x95\x9a\x0c\x15\x06\x01\xf5\xa2\xd4\x94\xcc\x22\xa0\x50\x68\x90\x4f\xb1\xba\x0e\x58\xa9\x06\x98\x80\x99\xa4\xa9\x6b\x67\x87\xaa\x4a\x53\x07\x53\x7b\x7c\x69\x51\x26\xd1\xfa\xc1\xda\x81\xa4\x26\x57\xb8\x87\x6b\x90\xab\x02\x1e\x0d\x10\x73\x35\x15\x3c\x83\x15\xfc\x42\x7d\x7c\xac\xb9\x00\x01\x00\x00\xff\xff\xeb\xe6\x9a\x19\x0b\x01\x00\x00")
|
|
|
|
|
|
|
|
|
|
func dbMigrations0010_client_metadata_field_changedSqlBytes() ([]byte, error) {
|
|
|
|
|
return bindataRead(
|
|
|
|
|
_dbMigrations0010_client_metadata_field_changedSql,
|
|
|
|
|
"db/migrations/0010_client_metadata_field_changed.sql",
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func dbMigrations0010_client_metadata_field_changedSql() (*asset, error) {
|
|
|
|
|
bytes, err := dbMigrations0010_client_metadata_field_changedSqlBytes()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := bindataFileInfo{name: "db/migrations/0010_client_metadata_field_changed.sql", size: 267, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
|
|
|
a := &asset{bytes: bytes, info: info}
|
|
|
|
|
return a, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Asset loads and returns the asset for the given name.
|
|
|
|
|
// It returns an error if the asset could not be found or
|
|
|
|
|
// could not be loaded.
|
|
|
|
|
func Asset(name string) ([]byte, error) {
|
|
|
|
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
|
|
|
if f, ok := _bindata[cannonicalName]; ok {
|
|
|
|
|
a, err := f()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
|
|
|
|
}
|
|
|
|
|
return a.bytes, nil
|
|
|
|
|
}
|
|
|
|
|
return nil, fmt.Errorf("Asset %s not found", name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// MustAsset is like Asset but panics when Asset would return an error.
|
|
|
|
|
// It simplifies safe initialization of global variables.
|
|
|
|
|
func MustAsset(name string) []byte {
|
|
|
|
|
a, err := Asset(name)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic("asset: Asset(" + name + "): " + err.Error())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return a
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// AssetInfo loads and returns the asset info for the given name.
|
|
|
|
|
// It returns an error if the asset could not be found or
|
|
|
|
|
// could not be loaded.
|
|
|
|
|
func AssetInfo(name string) (os.FileInfo, error) {
|
|
|
|
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
|
|
|
if f, ok := _bindata[cannonicalName]; ok {
|
|
|
|
|
a, err := f()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
|
|
|
|
}
|
|
|
|
|
return a.info, nil
|
|
|
|
|
}
|
|
|
|
|
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// AssetNames returns the names of the assets.
|
|
|
|
|
func AssetNames() []string {
|
|
|
|
|
names := make([]string, 0, len(_bindata))
|
|
|
|
|
for name := range _bindata {
|
|
|
|
|
names = append(names, name)
|
|
|
|
|
}
|
|
|
|
|
return names
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// _bindata is a table, holding each asset generator, mapped to its name.
|
|
|
|
|
var _bindata = map[string]func() (*asset, error){
|
|
|
|
|
"db/migrations/0001_initial_migration.sql": dbMigrations0001_initial_migrationSql,
|
|
|
|
|
"db/migrations/0002_dex_admin.sql": dbMigrations0002_dex_adminSql,
|
|
|
|
|
"db/migrations/0003_user_created_at.sql": dbMigrations0003_user_created_atSql,
|
|
|
|
|
"db/migrations/0004_session_nonce.sql": dbMigrations0004_session_nonceSql,
|
|
|
|
|
"db/migrations/0005_refresh_token_create.sql": dbMigrations0005_refresh_token_createSql,
|
|
|
|
|
"db/migrations/0006_user_email_unique.sql": dbMigrations0006_user_email_uniqueSql,
|
|
|
|
|
"db/migrations/0007_session_scope.sql": dbMigrations0007_session_scopeSql,
|
|
|
|
|
"db/migrations/0008_users_active_or_inactive.sql": dbMigrations0008_users_active_or_inactiveSql,
|
|
|
|
|
"db/migrations/0009_key_not_primary_key.sql": dbMigrations0009_key_not_primary_keySql,
|
|
|
|
|
"db/migrations/0010_client_metadata_field_changed.sql": dbMigrations0010_client_metadata_field_changedSql,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// AssetDir returns the file names below a certain
|
|
|
|
|
// directory embedded in the file by go-bindata.
|
|
|
|
|
// For example if you run go-bindata on data/... and data contains the
|
|
|
|
|
// following hierarchy:
|
|
|
|
|
// data/
|
|
|
|
|
// foo.txt
|
|
|
|
|
// img/
|
|
|
|
|
// a.png
|
|
|
|
|
// b.png
|
|
|
|
|
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
|
|
|
|
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
|
|
|
|
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
|
|
|
|
// AssetDir("") will return []string{"data"}.
|
|
|
|
|
func AssetDir(name string) ([]string, error) {
|
|
|
|
|
node := _bintree
|
|
|
|
|
if len(name) != 0 {
|
|
|
|
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
|
|
|
pathList := strings.Split(cannonicalName, "/")
|
|
|
|
|
for _, p := range pathList {
|
|
|
|
|
node = node.Children[p]
|
|
|
|
|
if node == nil {
|
|
|
|
|
return nil, fmt.Errorf("Asset %s not found", name)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if node.Func != nil {
|
|
|
|
|
return nil, fmt.Errorf("Asset %s not found", name)
|
|
|
|
|
}
|
|
|
|
|
rv := make([]string, 0, len(node.Children))
|
|
|
|
|
for childName := range node.Children {
|
|
|
|
|
rv = append(rv, childName)
|
|
|
|
|
}
|
|
|
|
|
return rv, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type bintree struct {
|
|
|
|
|
Func func() (*asset, error)
|
|
|
|
|
Children map[string]*bintree
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _bintree = &bintree{nil, map[string]*bintree{
|
|
|
|
|
"db": &bintree{nil, map[string]*bintree{
|
|
|
|
|
"migrations": &bintree{nil, map[string]*bintree{
|
|
|
|
|
"0001_initial_migration.sql": &bintree{dbMigrations0001_initial_migrationSql, map[string]*bintree{}},
|
|
|
|
|
"0002_dex_admin.sql": &bintree{dbMigrations0002_dex_adminSql, map[string]*bintree{}},
|
|
|
|
|
"0003_user_created_at.sql": &bintree{dbMigrations0003_user_created_atSql, map[string]*bintree{}},
|
|
|
|
|
"0004_session_nonce.sql": &bintree{dbMigrations0004_session_nonceSql, map[string]*bintree{}},
|
|
|
|
|
"0005_refresh_token_create.sql": &bintree{dbMigrations0005_refresh_token_createSql, map[string]*bintree{}},
|
|
|
|
|
"0006_user_email_unique.sql": &bintree{dbMigrations0006_user_email_uniqueSql, map[string]*bintree{}},
|
|
|
|
|
"0007_session_scope.sql": &bintree{dbMigrations0007_session_scopeSql, map[string]*bintree{}},
|
|
|
|
|
"0008_users_active_or_inactive.sql": &bintree{dbMigrations0008_users_active_or_inactiveSql, map[string]*bintree{}},
|
|
|
|
|
"0009_key_not_primary_key.sql": &bintree{dbMigrations0009_key_not_primary_keySql, map[string]*bintree{}},
|
|
|
|
|
"0010_client_metadata_field_changed.sql": &bintree{dbMigrations0010_client_metadata_field_changedSql, map[string]*bintree{}},
|
|
|
|
|
}},
|
|
|
|
|
}},
|
|
|
|
|
}}
|
|
|
|
|
|
|
|
|
|
// RestoreAsset restores an asset under the given directory
|
|
|
|
|
func RestoreAsset(dir, name string) error {
|
|
|
|
|
data, err := Asset(name)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
info, err := AssetInfo(name)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// RestoreAssets restores an asset under the given directory recursively
|
|
|
|
|
func RestoreAssets(dir, name string) error {
|
|
|
|
|
children, err := AssetDir(name)
|
|
|
|
|
// File
|
|
|
|
|
if err != nil {
|
|
|
|
|
return RestoreAsset(dir, name)
|
|
|
|
|
}
|
|
|
|
|
// Dir
|
|
|
|
|
for _, child := range children {
|
|
|
|
|
err = RestoreAssets(dir, filepath.Join(name, child))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func _filePath(dir, name string) string {
|
|
|
|
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
|
|
|
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
|
|
|
|
var PostgresMigrations migrate.MigrationSource = &migrate.MemoryMigrationSource{
|
|
|
|
|
Migrations: []*migrate.Migration{
|
|
|
|
|
{
|
|
|
|
|
Id: "0001_initial_migration.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nCREATE TABLE IF NOT EXISTS \"authd_user\" (\n \"id\" text not null primary key,\n \"email\" text,\n \"email_verified\" boolean,\n \"display_name\" text,\n \"admin\" boolean) ;\n\nCREATE TABLE IF NOT EXISTS \"client_identity\" (\n \"id\" text not null primary key,\n \"secret\" bytea,\n \"metadata\" text);\n\nCREATE TABLE IF NOT EXISTS \"connector_config\" (\n \"id\" text not null primary key,\n \"type\" text, \"config\" text) ;\n\nCREATE TABLE IF NOT EXISTS \"key\" (\n \"value\" bytea not null primary key) ;\n\nCREATE TABLE IF NOT EXISTS \"password_info\" (\n \"user_id\" text not null primary key,\n \"password\" text,\n \"password_expires\" bigint) ;\n\nCREATE TABLE IF NOT EXISTS \"session\" (\n \"id\" text not null primary key,\n \"state\" text,\n \"created_at\" bigint,\n \"expires_at\" bigint,\n \"client_id\" text,\n \"client_state\" text,\n \"redirect_url\" text, \"identity\" text,\n \"connector_id\" text,\n \"user_id\" text, \"register\" boolean) ;\n\nCREATE TABLE IF NOT EXISTS \"session_key\" (\n \"key\" text not null primary key,\n \"session_id\" text,\n \"expires_at\" bigint,\n \"stale\" boolean) ;\n\nCREATE TABLE IF NOT EXISTS \"remote_identity_mapping\" (\n \"connector_id\" text not null,\n \"user_id\" text,\n \"remote_id\" text not null,\n primary key (\"connector_id\", \"remote_id\")) ;\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0002_dex_admin.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nALTER TABLE client_identity ADD COLUMN \"dex_admin\" boolean;\n\nUPDATE \"client_identity\" SET \"dex_admin\" = false;\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0003_user_created_at.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nALTER TABLE authd_user ADD COLUMN \"created_at\" bigint;\n\nUPDATE authd_user SET \"created_at\" = 0;\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0004_session_nonce.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nALTER TABLE session ADD COLUMN \"nonce\" text;\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0005_refresh_token_create.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nCREATE TABLE refresh_token (\n id bigint NOT NULL,\n payload_hash bytea,\n user_id text,\n client_id text\n);\n\nCREATE SEQUENCE refresh_token_id_seq\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\nALTER SEQUENCE refresh_token_id_seq OWNED BY refresh_token.id;\n\nALTER TABLE ONLY refresh_token ALTER COLUMN id SET DEFAULT nextval('refresh_token_id_seq'::regclass);\n\nALTER TABLE ONLY refresh_token\n ADD CONSTRAINT refresh_token_pkey PRIMARY KEY (id);\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0006_user_email_unique.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nALTER TABLE ONLY authd_user\n ADD CONSTRAINT authd_user_email_key UNIQUE (email);\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0007_session_scope.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nALTER TABLE session ADD COLUMN \"scope\" text;\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0008_users_active_or_inactive.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nALTER TABLE authd_user ADD COLUMN disabled boolean;\n\nUPDATE authd_user SET \"disabled\" = FALSE;\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0009_key_not_primary_key.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nALTER TABLE key ADD COLUMN tmp_value bytea;\nUPDATE KEY SET tmp_value = value;\nALTER TABLE key DROP COLUMN value;\nALTER TABLE key RENAME COLUMN \"tmp_value\" to \"value\";\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0010_client_metadata_field_changed.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\nUPDATE client_identity\nSET metadata = text(\n json_build_object(\n 'redirectURLs', json(json(metadata)->>'redirectURLs'),\n 'redirect_uris', json(json(metadata)->>'redirectURLs')\n )\n )\nWHERE (json(metadata)->>'redirect_uris') IS NULL;\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Id: "0011_case_insensitive_emails.sql",
|
|
|
|
|
Up: []string{
|
|
|
|
|
"-- +migrate Up\n\n-- This migration is a fix for a bug that allowed duplicate emails if they used different cases (see #338).\n-- When migrating, dex will not take the liberty of deleting rows for duplicate cases. Instead it will\n-- raise an exception and call for an admin to remove duplicates manually.\n\nCREATE OR REPLACE FUNCTION raise_exp() RETURNS VOID AS $$\nBEGIN\n RAISE EXCEPTION 'Found duplicate emails when using case insensitive comparision, cannot perform migration.';\nEND;\n$$ LANGUAGE plpgsql;\n\nSELECT LOWER(email),\n COUNT(email),\n CASE\n WHEN COUNT(email) > 1 THEN raise_exp()\n ELSE NULL\n END\nFROM authd_user\nGROUP BY LOWER(email);\n\nUPDATE authd_user SET email = LOWER(email);\n",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|