forked from mystiq/dex
Merge pull request #339 from ericchiang/case_insensitive_emails
db: email comparison should be case insensitive
This commit is contained in:
commit
60b843e78b
14 changed files with 290 additions and 468 deletions
|
@ -53,12 +53,7 @@ func DropMigrationsTable(dbMap *gorp.DbMap) error {
|
||||||
func migrationSource(dbMap *gorp.DbMap) (src migrate.MigrationSource, dialect string, err error) {
|
func migrationSource(dbMap *gorp.DbMap) (src migrate.MigrationSource, dialect string, err error) {
|
||||||
switch dbMap.Dialect.(type) {
|
switch dbMap.Dialect.(type) {
|
||||||
case gorp.PostgresDialect:
|
case gorp.PostgresDialect:
|
||||||
src = &migrate.AssetMigrationSource{
|
return migrations.PostgresMigrations, "postgres", nil
|
||||||
Dir: migrationDir,
|
|
||||||
Asset: migrations.Asset,
|
|
||||||
AssetDir: migrations.AssetDir,
|
|
||||||
}
|
|
||||||
return src, "postgres", nil
|
|
||||||
case gorp.SqliteDialect:
|
case gorp.SqliteDialect:
|
||||||
src = &migrate.MemoryMigrationSource{
|
src = &migrate.MemoryMigrationSource{
|
||||||
Migrations: []*migrate.Migration{
|
Migrations: []*migrate.Migration{
|
||||||
|
|
|
@ -3,10 +3,12 @@ package db
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-gorp/gorp"
|
"github.com/go-gorp/gorp"
|
||||||
|
"github.com/kylelemons/godebug/pretty"
|
||||||
)
|
)
|
||||||
|
|
||||||
func initDB(dsn string) *gorp.DbMap {
|
func initDB(dsn string) *gorp.DbMap {
|
||||||
|
@ -14,6 +16,9 @@ func initDB(dsn string) *gorp.DbMap {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("error making db connection: %q", err))
|
panic(fmt.Sprintf("error making db connection: %q", err))
|
||||||
}
|
}
|
||||||
|
if _, err := c.Exec(fmt.Sprintf("DROP TABLE IF EXISTS %s;", migrationTable)); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to drop migration table: %v", err))
|
||||||
|
}
|
||||||
if err = c.DropTablesIfExists(); err != nil {
|
if err = c.DropTablesIfExists(); err != nil {
|
||||||
panic(fmt.Sprintf("Unable to drop database tables: %v", err))
|
panic(fmt.Sprintf("Unable to drop database tables: %v", err))
|
||||||
}
|
}
|
||||||
|
@ -119,3 +124,84 @@ func TestMigrateClientMetadata(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMigrationNumber11(t *testing.T) {
|
||||||
|
dsn := os.Getenv("DEX_TEST_DSN")
|
||||||
|
if dsn == "" {
|
||||||
|
t.Skip("Test will not run without DEX_TEST_DSN environment variable.")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
sqlStmt string
|
||||||
|
wantEmails []string
|
||||||
|
wantError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
sqlStmt: `INSERT INTO authd_user
|
||||||
|
(id, email, email_verified, display_name, admin, created_at)
|
||||||
|
VALUES
|
||||||
|
(1, 'Foo@example.com', TRUE, 'foo', FALSE, extract(epoch from now())),
|
||||||
|
(2, 'Bar@example.com', TRUE, 'foo', FALSE, extract(epoch from now()))
|
||||||
|
;`,
|
||||||
|
wantEmails: []string{"foo@example.com", "bar@example.com"},
|
||||||
|
wantError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sqlStmt: `INSERT INTO authd_user
|
||||||
|
(id, email, email_verified, display_name, admin, created_at)
|
||||||
|
VALUES
|
||||||
|
(1, 'Foo@example.com', TRUE, 'foo', FALSE, extract(epoch from now())),
|
||||||
|
(2, 'foo@example.com', TRUE, 'foo', FALSE, extract(epoch from now())),
|
||||||
|
(3, 'bar@example.com', TRUE, 'foo', FALSE, extract(epoch from now()))
|
||||||
|
;`,
|
||||||
|
wantError: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
migrateN := func(dbMap *gorp.DbMap, n int) error {
|
||||||
|
nPerformed, err := MigrateMaxMigrations(dbMap, n)
|
||||||
|
if err == nil && n != nPerformed {
|
||||||
|
err = fmt.Errorf("expected to perform %d migrations, performed %d", n, nPerformed)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
err := func() error {
|
||||||
|
dbMap := initDB(dsn)
|
||||||
|
|
||||||
|
nMigrations := 10
|
||||||
|
if err := migrateN(dbMap, nMigrations); err != nil {
|
||||||
|
return fmt.Errorf("failed to perform initial migration: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := dbMap.Exec(tt.sqlStmt); err != nil {
|
||||||
|
return fmt.Errorf("failed to insert users: %v", err)
|
||||||
|
}
|
||||||
|
if err := migrateN(dbMap, 1); err != nil {
|
||||||
|
if tt.wantError {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to perform migration: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tt.wantError {
|
||||||
|
return fmt.Errorf("expected an error when migrating")
|
||||||
|
}
|
||||||
|
|
||||||
|
var gotEmails []string
|
||||||
|
if _, err := dbMap.Select(&gotEmails, `SELECT email FROM authd_user;`); err != nil {
|
||||||
|
return fmt.Errorf("could not get user emails: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(tt.wantEmails)
|
||||||
|
sort.Strings(gotEmails)
|
||||||
|
if diff := pretty.Compare(tt.wantEmails, gotEmails); diff != "" {
|
||||||
|
return fmt.Errorf("wantEmails != gotEmails: %s", diff)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("case %d: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
22
db/migrations/0011_case_insensitive_emails.sql
Normal file
22
db/migrations/0011_case_insensitive_emails.sql
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
-- +migrate Up
|
||||||
|
|
||||||
|
-- This migration is a fix for a bug that allowed duplicate emails if they used different cases (see #338).
|
||||||
|
-- When migrating, dex will not take the liberty of deleting rows for duplicate cases. Instead it will
|
||||||
|
-- raise an exception and call for an admin to remove duplicates manually.
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION raise_exp() RETURNS VOID AS $$
|
||||||
|
BEGIN
|
||||||
|
RAISE EXCEPTION 'Found duplicate emails when using case insensitive comparision, cannot perform migration.';
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
SELECT LOWER(email),
|
||||||
|
COUNT(email),
|
||||||
|
CASE
|
||||||
|
WHEN COUNT(email) > 1 THEN raise_exp()
|
||||||
|
ELSE NULL
|
||||||
|
END
|
||||||
|
FROM authd_user
|
||||||
|
GROUP BY LOWER(email);
|
||||||
|
|
||||||
|
UPDATE authd_user SET email = LOWER(email);
|
|
@ -1,446 +1,76 @@
|
||||||
// Code generated by go-bindata.
|
|
||||||
// sources:
|
|
||||||
// 0001_initial_migration.sql
|
|
||||||
// 0002_dex_admin.sql
|
|
||||||
// 0003_user_created_at.sql
|
|
||||||
// 0004_session_nonce.sql
|
|
||||||
// 0005_refresh_token_create.sql
|
|
||||||
// 0006_user_email_unique.sql
|
|
||||||
// 0007_session_scope.sql
|
|
||||||
// 0008_users_active_or_inactive.sql
|
|
||||||
// 0009_key_not_primary_key.sql
|
|
||||||
// 0010_client_metadata_field_changed.sql
|
|
||||||
// DO NOT EDIT!
|
|
||||||
|
|
||||||
package migrations
|
package migrations
|
||||||
|
|
||||||
import (
|
// THIS FILE WAS GENERATED BY gen.go DO NOT EDIT
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
import "github.com/rubenv/sql-migrate"
|
||||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var PostgresMigrations migrate.MigrationSource = &migrate.MemoryMigrationSource{
|
||||||
_, err = io.Copy(&buf, gz)
|
Migrations: []*migrate.Migration{
|
||||||
clErr := gz.Close()
|
{
|
||||||
|
Id: "0001_initial_migration.sql",
|
||||||
if err != nil {
|
Up: []string{
|
||||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
"-- +migrate Up\nCREATE TABLE IF NOT EXISTS \"authd_user\" (\n \"id\" text not null primary key,\n \"email\" text,\n \"email_verified\" boolean,\n \"display_name\" text,\n \"admin\" boolean) ;\n\nCREATE TABLE IF NOT EXISTS \"client_identity\" (\n \"id\" text not null primary key,\n \"secret\" bytea,\n \"metadata\" text);\n\nCREATE TABLE IF NOT EXISTS \"connector_config\" (\n \"id\" text not null primary key,\n \"type\" text, \"config\" text) ;\n\nCREATE TABLE IF NOT EXISTS \"key\" (\n \"value\" bytea not null primary key) ;\n\nCREATE TABLE IF NOT EXISTS \"password_info\" (\n \"user_id\" text not null primary key,\n \"password\" text,\n \"password_expires\" bigint) ;\n\nCREATE TABLE IF NOT EXISTS \"session\" (\n \"id\" text not null primary key,\n \"state\" text,\n \"created_at\" bigint,\n \"expires_at\" bigint,\n \"client_id\" text,\n \"client_state\" text,\n \"redirect_url\" text, \"identity\" text,\n \"connector_id\" text,\n \"user_id\" text, \"register\" boolean) ;\n\nCREATE TABLE IF NOT EXISTS \"session_key\" (\n \"key\" text not null primary key,\n \"session_id\" text,\n \"expires_at\" bigint,\n \"stale\" boolean) ;\n\nCREATE TABLE IF NOT EXISTS \"remote_identity_mapping\" (\n \"connector_id\" text not null,\n \"user_id\" text,\n \"remote_id\" text not null,\n primary key (\"connector_id\", \"remote_id\")) ;\n",
|
||||||
}
|
},
|
||||||
if clErr != nil {
|
},
|
||||||
return nil, err
|
{
|
||||||
}
|
Id: "0002_dex_admin.sql",
|
||||||
|
Up: []string{
|
||||||
return buf.Bytes(), nil
|
"-- +migrate Up\nALTER TABLE client_identity ADD COLUMN \"dex_admin\" boolean;\n\nUPDATE \"client_identity\" SET \"dex_admin\" = false;\n",
|
||||||
}
|
},
|
||||||
|
},
|
||||||
type asset struct {
|
{
|
||||||
bytes []byte
|
Id: "0003_user_created_at.sql",
|
||||||
info os.FileInfo
|
Up: []string{
|
||||||
}
|
"-- +migrate Up\nALTER TABLE authd_user ADD COLUMN \"created_at\" bigint;\n\nUPDATE authd_user SET \"created_at\" = 0;\n",
|
||||||
|
},
|
||||||
type bindataFileInfo struct {
|
},
|
||||||
name string
|
{
|
||||||
size int64
|
Id: "0004_session_nonce.sql",
|
||||||
mode os.FileMode
|
Up: []string{
|
||||||
modTime time.Time
|
"-- +migrate Up\nALTER TABLE session ADD COLUMN \"nonce\" text;\n",
|
||||||
}
|
},
|
||||||
|
},
|
||||||
func (fi bindataFileInfo) Name() string {
|
{
|
||||||
return fi.name
|
Id: "0005_refresh_token_create.sql",
|
||||||
}
|
Up: []string{
|
||||||
func (fi bindataFileInfo) Size() int64 {
|
"-- +migrate Up\nCREATE TABLE refresh_token (\n id bigint NOT NULL,\n payload_hash bytea,\n user_id text,\n client_id text\n);\n\nCREATE SEQUENCE refresh_token_id_seq\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\nALTER SEQUENCE refresh_token_id_seq OWNED BY refresh_token.id;\n\nALTER TABLE ONLY refresh_token ALTER COLUMN id SET DEFAULT nextval('refresh_token_id_seq'::regclass);\n\nALTER TABLE ONLY refresh_token\n ADD CONSTRAINT refresh_token_pkey PRIMARY KEY (id);\n",
|
||||||
return fi.size
|
},
|
||||||
}
|
},
|
||||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
{
|
||||||
return fi.mode
|
Id: "0006_user_email_unique.sql",
|
||||||
}
|
Up: []string{
|
||||||
func (fi bindataFileInfo) ModTime() time.Time {
|
"-- +migrate Up\nALTER TABLE ONLY authd_user\n ADD CONSTRAINT authd_user_email_key UNIQUE (email);\n",
|
||||||
return fi.modTime
|
},
|
||||||
}
|
},
|
||||||
func (fi bindataFileInfo) IsDir() bool {
|
{
|
||||||
return false
|
Id: "0007_session_scope.sql",
|
||||||
}
|
Up: []string{
|
||||||
func (fi bindataFileInfo) Sys() interface{} {
|
"-- +migrate Up\nALTER TABLE session ADD COLUMN \"scope\" text;\n",
|
||||||
return nil
|
},
|
||||||
}
|
},
|
||||||
|
{
|
||||||
var _dbMigrations0001_initial_migrationSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x9c\x93\xcf\x8a\xdb\x30\x10\xc6\xef\x79\x0a\x91\xd3\x86\x66\x9f\xa0\xa7\x6d\x71\x61\xa1\xb4\xd0\x75\xa1\x37\x31\xb5\x26\xee\x50\xfd\x43\x1a\x6f\xd7\x6f\x5f\xd9\x4e\x14\xc7\xce\xc6\x26\x81\x84\x58\xd6\xfc\xe6\x9b\x6f\x66\x1e\x1f\xc5\x07\x43\x75\x00\x46\xf1\xd3\x6f\x3e\xff\x28\x9e\xca\x42\x94\x4f\x9f\xbe\x16\xe2\xf9\x8b\xf8\xf6\xbd\x14\xc5\xaf\xe7\x97\xf2\x45\x6c\xa1\xe1\x3f\x4a\x36\x11\xc3\x56\x3c\x6c\xc4\xf0\xd9\x92\xda\x0a\xc6\x37\x16\xd6\xa5\x6f\xa3\xb5\xf0\x81\x0c\x84\x56\xfc\xc5\x76\x9f\xaf\xa1\x01\xd2\xc3\xcd\xc9\xa1\x7c\xc5\x40\x07\xc2\xc4\xf9\xed\x9c\x46\xb0\xe7\x0b\x8a\xa2\xd7\xd0\x4a\x0b\x06\xa7\xc1\xa0\x0c\xd9\x1c\xb3\x13\x1f\x37\x37\xd5\x57\x9a\xd0\xb2\x24\x95\x7e\x89\xdb\x3b\x4a\x88\x58\x05\xe4\x94\xb1\x65\x84\xf3\xb1\x41\x06\x05\x0c\x03\x63\xb7\x24\xc3\x59\x8b\x15\xbb\x20\xd3\xbf\x03\xd5\x77\xe8\xe0\xd6\x9f\xcc\xe8\x79\x3d\xa5\xcf\xbd\xe4\x41\xc2\x8c\xf3\xbd\x82\x6e\xf0\x58\xcf\xd5\x9c\x8b\x40\x0f\x31\xfe\x73\x41\x49\xb2\x07\x37\x46\x77\x53\x22\x57\xd6\x73\x82\x4c\x1b\x9c\xe1\xf8\xe6\x29\x60\x4c\x4a\xa9\x26\xbb\x5c\x66\xc4\x18\xc9\xd9\x7b\x5a\xcc\x69\x0f\xa6\x3a\x52\xd7\xd3\xa9\x92\xc0\x27\x05\xa3\x09\x1e\xa4\x5d\x7d\x97\x27\x6e\x06\x1c\x5e\x5c\x4d\x16\x50\x25\x60\xc5\xb2\x09\x3a\x37\xf9\x3c\xb4\x13\x50\x1e\xa6\x79\x92\x8b\x0e\xec\x3b\x70\x4d\x91\xbb\xdd\x5d\xbb\x31\x47\x1b\xe5\x64\x6a\xfa\xc7\x55\xeb\x32\x84\xcf\xa5\xdd\x32\x2d\x99\xa2\x71\xbd\xc6\x80\xc6\x31\xe6\xad\x96\x06\xbc\x27\x7b\xb1\x55\x73\x93\xb2\xf0\xf7\xdc\x1a\x75\xe3\x88\x7f\x2f\x70\x54\xb9\x78\xb8\x4c\xb5\x1f\x87\xef\xba\x4a\xfe\x07\x00\x00\xff\xff\xae\xbe\x65\x61\x6c\x05\x00\x00")
|
Id: "0008_users_active_or_inactive.sql",
|
||||||
|
Up: []string{
|
||||||
func dbMigrations0001_initial_migrationSqlBytes() ([]byte, error) {
|
"-- +migrate Up\nALTER TABLE authd_user ADD COLUMN disabled boolean;\n\nUPDATE authd_user SET \"disabled\" = FALSE;\n",
|
||||||
return bindataRead(
|
},
|
||||||
_dbMigrations0001_initial_migrationSql,
|
},
|
||||||
"db/migrations/0001_initial_migration.sql",
|
{
|
||||||
)
|
Id: "0009_key_not_primary_key.sql",
|
||||||
}
|
Up: []string{
|
||||||
|
"-- +migrate Up\nALTER TABLE key ADD COLUMN tmp_value bytea;\nUPDATE KEY SET tmp_value = value;\nALTER TABLE key DROP COLUMN value;\nALTER TABLE key RENAME COLUMN \"tmp_value\" to \"value\";\n",
|
||||||
func dbMigrations0001_initial_migrationSql() (*asset, error) {
|
},
|
||||||
bytes, err := dbMigrations0001_initial_migrationSqlBytes()
|
},
|
||||||
if err != nil {
|
{
|
||||||
return nil, err
|
Id: "0010_client_metadata_field_changed.sql",
|
||||||
}
|
Up: []string{
|
||||||
|
"-- +migrate Up\nUPDATE client_identity\nSET metadata = text(\n json_build_object(\n 'redirectURLs', json(json(metadata)->>'redirectURLs'),\n 'redirect_uris', json(json(metadata)->>'redirectURLs')\n )\n )\nWHERE (json(metadata)->>'redirect_uris') IS NULL;\n",
|
||||||
info := bindataFileInfo{name: "db/migrations/0001_initial_migration.sql", size: 1388, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
},
|
||||||
a := &asset{bytes: bytes, info: info}
|
},
|
||||||
return a, nil
|
{
|
||||||
}
|
Id: "0011_case_insensitive_emails.sql",
|
||||||
|
Up: []string{
|
||||||
var _dbMigrations0002_dex_adminSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\xce\xc9\x4c\xcd\x2b\x89\xcf\x4c\x01\x92\x99\x25\x95\x0a\x8e\x2e\x2e\x0a\xce\xfe\x3e\xa1\xbe\x7e\x0a\x4a\x29\xa9\x15\xf1\x89\x29\xb9\x99\x79\x4a\x0a\x49\xf9\xf9\x39\xa9\x89\x79\xd6\x5c\x5c\xa1\x01\x2e\x8e\x21\xae\x0a\x4a\x68\x1a\x95\x14\x82\x5d\x43\x50\xb4\xd8\x2a\xa4\x25\xe6\x14\xa7\x5a\x73\x01\x02\x00\x00\xff\xff\x89\xb3\xb1\x5d\x7e\x00\x00\x00")
|
"-- +migrate Up\n\n-- This migration is a fix for a bug that allowed duplicate emails if they used different cases (see #338).\n-- When migrating, dex will not take the liberty of deleting rows for duplicate cases. Instead it will\n-- raise an exception and call for an admin to remove duplicates manually.\n\nCREATE OR REPLACE FUNCTION raise_exp() RETURNS VOID AS $$\nBEGIN\n RAISE EXCEPTION 'Found duplicate emails when using case insensitive comparision, cannot perform migration.';\nEND;\n$$ LANGUAGE plpgsql;\n\nSELECT LOWER(email),\n COUNT(email),\n CASE\n WHEN COUNT(email) > 1 THEN raise_exp()\n ELSE NULL\n END\nFROM authd_user\nGROUP BY LOWER(email);\n\nUPDATE authd_user SET email = LOWER(email);\n",
|
||||||
|
},
|
||||||
func dbMigrations0002_dex_adminSqlBytes() ([]byte, error) {
|
},
|
||||||
return bindataRead(
|
},
|
||||||
_dbMigrations0002_dex_adminSql,
|
|
||||||
"db/migrations/0002_dex_admin.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0002_dex_adminSql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0002_dex_adminSqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0002_dex_admin.sql", size: 126, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _dbMigrations0003_user_created_atSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x2d\xc9\x48\x89\x2f\x2d\x4e\x2d\x52\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x50\x4a\x2e\x4a\x05\x2a\x4e\x89\x4f\x2c\x51\x52\x48\xca\x4c\xcf\xcc\x2b\xb1\xe6\xe2\x0a\x0d\x70\x71\x0c\x41\xd1\x12\xec\x1a\x82\xaa\xd6\x56\xc1\xc0\x9a\x0b\x10\x00\x00\xff\xff\x28\xce\xe1\xe1\x6f\x00\x00\x00")
|
|
||||||
|
|
||||||
func dbMigrations0003_user_created_atSqlBytes() ([]byte, error) {
|
|
||||||
return bindataRead(
|
|
||||||
_dbMigrations0003_user_created_atSql,
|
|
||||||
"db/migrations/0003_user_created_at.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0003_user_created_atSql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0003_user_created_atSqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0003_user_created_at.sql", size: 111, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _dbMigrations0004_session_nonceSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4e\x2d\x2e\xce\xcc\xcf\x53\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x50\xca\xcb\xcf\x4b\x4e\x55\x52\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x77\x11\x16\x8b\x3c\x00\x00\x00")
|
|
||||||
|
|
||||||
func dbMigrations0004_session_nonceSqlBytes() ([]byte, error) {
|
|
||||||
return bindataRead(
|
|
||||||
_dbMigrations0004_session_nonceSql,
|
|
||||||
"db/migrations/0004_session_nonce.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0004_session_nonceSql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0004_session_nonceSqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0004_session_nonce.sql", size: 60, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _dbMigrations0005_refresh_token_createSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x84\x90\x51\x4f\xc2\x30\x10\xc7\xdf\xf7\x29\xee\x0d\x88\x62\xc2\x2b\x3e\x95\xed\x0c\x8b\x5d\xa7\x5d\x2b\xf2\xb4\x14\x56\xa1\x61\x8e\xb9\x56\xe3\xbe\xbd\x65\x13\x0c\xc4\x68\xdf\xfa\xbb\xfb\xdf\x2f\x77\xe3\x31\x5c\xbd\x9a\x4d\xa3\x9c\x06\x59\x07\x21\x47\x22\x10\x04\x99\x51\x84\x46\xbf\x34\xda\x6e\x73\xb7\xdf\xe9\x0a\x86\x01\xf8\x67\x0a\x58\x99\x8d\xa9\x1c\xb0\x54\x00\x93\x94\x5e\x77\xbc\x56\x6d\xb9\x57\x45\xbe\x55\x76\x0b\xab\xd6\x69\xd5\xf3\x77\xab\x9b\xdc\x87\x9c\xfe\x74\x3d\x59\x97\x46\x57\xee\xc8\x82\xd1\x6d\x70\xb4\x66\xf8\x28\x91\x85\x17\x62\xdf\x99\x5b\xfd\xd6\x65\x33\x41\xb8\x80\x45\x2c\xe6\x30\xe9\x40\xcc\x7c\x36\x41\x26\x60\xb6\xfc\x46\x2c\x85\x24\x66\x4f\x84\x4a\x3c\xfd\xc9\xf3\xcf\x3f\x24\xe1\x1c\x61\xe2\xb5\x84\x0a\xe4\x7f\x5b\x21\x5d\x30\x8c\x0e\xc3\xcf\xaa\x37\xa6\x38\xe5\xfb\x5b\xa5\x8c\x5e\xf4\x40\x5f\x0e\x53\x2a\x13\x76\xb8\x5b\x86\x02\x22\xbc\x23\x92\x0a\xa8\xfc\xea\x1f\xaa\x1c\x0e\x7e\x93\x0e\xa6\xd3\x46\x6f\xd6\xa5\xb2\x76\xf4\xaf\xa6\xdb\x89\x44\x91\x17\xb1\x4c\x70\x12\xfb\x5b\x9c\x0f\xad\x77\xba\x85\x07\x1e\x27\x84\x2f\xe1\x1e\x97\x30\x34\x85\x9f\xfb\x15\x00\x00\xff\xff\xc4\xcc\xa8\xfd\xfa\x01\x00\x00")
|
|
||||||
|
|
||||||
func dbMigrations0005_refresh_token_createSqlBytes() ([]byte, error) {
|
|
||||||
return bindataRead(
|
|
||||||
_dbMigrations0005_refresh_token_createSql,
|
|
||||||
"db/migrations/0005_refresh_token_create.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0005_refresh_token_createSql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0005_refresh_token_createSqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0005_refresh_token_create.sql", size: 506, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _dbMigrations0006_user_email_uniqueSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\xf0\xf7\xf3\x89\x54\x48\x2c\x2d\xc9\x48\x89\x2f\x2d\x4e\x2d\xe2\x52\x00\x02\x47\x17\x17\x05\x67\x7f\xbf\xe0\x90\x20\x47\x4f\xbf\x10\x24\xd9\xf8\xd4\xdc\xc4\xcc\x9c\xf8\xec\xd4\x4a\x85\x50\x3f\xcf\xc0\x50\x57\x05\x0d\xb0\x88\xa6\x35\x17\x20\x00\x00\xff\xff\x18\x48\x0d\x30\x63\x00\x00\x00")
|
|
||||||
|
|
||||||
func dbMigrations0006_user_email_uniqueSqlBytes() ([]byte, error) {
|
|
||||||
return bindataRead(
|
|
||||||
_dbMigrations0006_user_email_uniqueSql,
|
|
||||||
"db/migrations/0006_user_email_unique.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0006_user_email_uniqueSql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0006_user_email_uniqueSqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0006_user_email_unique.sql", size: 99, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _dbMigrations0007_session_scopeSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4e\x2d\x2e\xce\xcc\xcf\x53\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x50\x2a\x4e\xce\x2f\x48\x55\x52\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x79\x7f\x20\x70\x3c\x00\x00\x00")
|
|
||||||
|
|
||||||
func dbMigrations0007_session_scopeSqlBytes() ([]byte, error) {
|
|
||||||
return bindataRead(
|
|
||||||
_dbMigrations0007_session_scopeSql,
|
|
||||||
"db/migrations/0007_session_scope.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0007_session_scopeSql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0007_session_scopeSqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0007_session_scope.sql", size: 60, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _dbMigrations0008_users_active_or_inactiveSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x2d\xc9\x48\x89\x2f\x2d\x4e\x2d\x52\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x48\xc9\x2c\x4e\x4c\xca\x49\x4d\x51\x48\xca\xcf\xcf\x49\x4d\xcc\xb3\xe6\xe2\x0a\x0d\x70\x71\x0c\x41\x51\x1f\xec\x1a\xa2\xa0\x04\x53\xa9\xa4\x60\xab\xe0\xe6\xe8\x13\xec\x6a\xcd\x05\x08\x00\x00\xff\xff\x06\x75\x98\x47\x6e\x00\x00\x00")
|
|
||||||
|
|
||||||
func dbMigrations0008_users_active_or_inactiveSqlBytes() ([]byte, error) {
|
|
||||||
return bindataRead(
|
|
||||||
_dbMigrations0008_users_active_or_inactiveSql,
|
|
||||||
"db/migrations/0008_users_active_or_inactive.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0008_users_active_or_inactiveSql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0008_users_active_or_inactiveSqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0008_users_active_or_inactive.sql", size: 110, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _dbMigrations0009_key_not_primary_keySql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\xc8\x4e\xad\x54\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x28\xc9\x2d\x88\x2f\x4b\xcc\x29\x4d\x55\x48\xaa\x2c\x49\x4d\xb4\xe6\x0a\x0d\x70\x71\x0c\x71\x55\xf0\x76\x8d\x54\x08\x76\x0d\x41\x92\xb7\x55\x00\xd3\xd6\x18\xa6\xb9\x04\xf9\x07\xc0\x8c\xc3\xa1\x24\xc8\xd5\xcf\xd1\xd7\x15\xa6\x48\x09\x6e\xa8\x92\x42\x49\xbe\x82\x12\x84\x69\xcd\x05\x08\x00\x00\xff\xff\x58\x6f\x17\x19\xb6\x00\x00\x00")
|
|
||||||
|
|
||||||
func dbMigrations0009_key_not_primary_keySqlBytes() ([]byte, error) {
|
|
||||||
return bindataRead(
|
|
||||||
_dbMigrations0009_key_not_primary_keySql,
|
|
||||||
"db/migrations/0009_key_not_primary_key.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0009_key_not_primary_keySql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0009_key_not_primary_keySqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0009_key_not_primary_key.sql", size: 182, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _dbMigrations0010_client_metadata_field_changedSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x0a\x0d\x70\x71\x0c\x71\x55\x48\xce\xc9\x4c\xcd\x2b\x89\xcf\x4c\x01\x92\x99\x25\x95\x5c\xc1\xae\x21\x0a\xb9\xa9\x25\x89\x29\x89\x25\x89\x0a\xb6\x0a\x25\xa9\x15\x25\x1a\x5c\x0a\x40\x90\x55\x9c\x9f\x17\x9f\x54\x9a\x99\x93\x12\x9f\x9f\x94\x95\x9a\x0c\x15\x06\x01\xf5\xa2\xd4\x94\xcc\x22\xa0\x50\x68\x90\x4f\xb1\xba\x0e\x58\xa9\x06\x98\x80\x99\xa4\xa9\x6b\x67\x87\xaa\x4a\x53\x07\x53\x7b\x7c\x69\x51\x26\xd1\xfa\xc1\xda\x81\xa4\x26\x57\xb8\x87\x6b\x90\xab\x02\x1e\x0d\x10\x73\x35\x15\x3c\x83\x15\xfc\x42\x7d\x7c\xac\xb9\x00\x01\x00\x00\xff\xff\xeb\xe6\x9a\x19\x0b\x01\x00\x00")
|
|
||||||
|
|
||||||
func dbMigrations0010_client_metadata_field_changedSqlBytes() ([]byte, error) {
|
|
||||||
return bindataRead(
|
|
||||||
_dbMigrations0010_client_metadata_field_changedSql,
|
|
||||||
"db/migrations/0010_client_metadata_field_changed.sql",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbMigrations0010_client_metadata_field_changedSql() (*asset, error) {
|
|
||||||
bytes, err := dbMigrations0010_client_metadata_field_changedSqlBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := bindataFileInfo{name: "db/migrations/0010_client_metadata_field_changed.sql", size: 267, mode: os.FileMode(436), modTime: time.Unix(1, 0)}
|
|
||||||
a := &asset{bytes: bytes, info: info}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Asset loads and returns the asset for the given name.
|
|
||||||
// It returns an error if the asset could not be found or
|
|
||||||
// could not be loaded.
|
|
||||||
func Asset(name string) ([]byte, error) {
|
|
||||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
||||||
if f, ok := _bindata[cannonicalName]; ok {
|
|
||||||
a, err := f()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
|
||||||
}
|
|
||||||
return a.bytes, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Asset %s not found", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustAsset is like Asset but panics when Asset would return an error.
|
|
||||||
// It simplifies safe initialization of global variables.
|
|
||||||
func MustAsset(name string) []byte {
|
|
||||||
a, err := Asset(name)
|
|
||||||
if err != nil {
|
|
||||||
panic("asset: Asset(" + name + "): " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// AssetInfo loads and returns the asset info for the given name.
|
|
||||||
// It returns an error if the asset could not be found or
|
|
||||||
// could not be loaded.
|
|
||||||
func AssetInfo(name string) (os.FileInfo, error) {
|
|
||||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
||||||
if f, ok := _bindata[cannonicalName]; ok {
|
|
||||||
a, err := f()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
|
||||||
}
|
|
||||||
return a.info, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AssetNames returns the names of the assets.
|
|
||||||
func AssetNames() []string {
|
|
||||||
names := make([]string, 0, len(_bindata))
|
|
||||||
for name := range _bindata {
|
|
||||||
names = append(names, name)
|
|
||||||
}
|
|
||||||
return names
|
|
||||||
}
|
|
||||||
|
|
||||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
|
||||||
var _bindata = map[string]func() (*asset, error){
|
|
||||||
"db/migrations/0001_initial_migration.sql": dbMigrations0001_initial_migrationSql,
|
|
||||||
"db/migrations/0002_dex_admin.sql": dbMigrations0002_dex_adminSql,
|
|
||||||
"db/migrations/0003_user_created_at.sql": dbMigrations0003_user_created_atSql,
|
|
||||||
"db/migrations/0004_session_nonce.sql": dbMigrations0004_session_nonceSql,
|
|
||||||
"db/migrations/0005_refresh_token_create.sql": dbMigrations0005_refresh_token_createSql,
|
|
||||||
"db/migrations/0006_user_email_unique.sql": dbMigrations0006_user_email_uniqueSql,
|
|
||||||
"db/migrations/0007_session_scope.sql": dbMigrations0007_session_scopeSql,
|
|
||||||
"db/migrations/0008_users_active_or_inactive.sql": dbMigrations0008_users_active_or_inactiveSql,
|
|
||||||
"db/migrations/0009_key_not_primary_key.sql": dbMigrations0009_key_not_primary_keySql,
|
|
||||||
"db/migrations/0010_client_metadata_field_changed.sql": dbMigrations0010_client_metadata_field_changedSql,
|
|
||||||
}
|
|
||||||
|
|
||||||
// AssetDir returns the file names below a certain
|
|
||||||
// directory embedded in the file by go-bindata.
|
|
||||||
// For example if you run go-bindata on data/... and data contains the
|
|
||||||
// following hierarchy:
|
|
||||||
// data/
|
|
||||||
// foo.txt
|
|
||||||
// img/
|
|
||||||
// a.png
|
|
||||||
// b.png
|
|
||||||
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
|
||||||
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
|
||||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
|
||||||
// AssetDir("") will return []string{"data"}.
|
|
||||||
func AssetDir(name string) ([]string, error) {
|
|
||||||
node := _bintree
|
|
||||||
if len(name) != 0 {
|
|
||||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
||||||
pathList := strings.Split(cannonicalName, "/")
|
|
||||||
for _, p := range pathList {
|
|
||||||
node = node.Children[p]
|
|
||||||
if node == nil {
|
|
||||||
return nil, fmt.Errorf("Asset %s not found", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if node.Func != nil {
|
|
||||||
return nil, fmt.Errorf("Asset %s not found", name)
|
|
||||||
}
|
|
||||||
rv := make([]string, 0, len(node.Children))
|
|
||||||
for childName := range node.Children {
|
|
||||||
rv = append(rv, childName)
|
|
||||||
}
|
|
||||||
return rv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type bintree struct {
|
|
||||||
Func func() (*asset, error)
|
|
||||||
Children map[string]*bintree
|
|
||||||
}
|
|
||||||
|
|
||||||
var _bintree = &bintree{nil, map[string]*bintree{
|
|
||||||
"db": &bintree{nil, map[string]*bintree{
|
|
||||||
"migrations": &bintree{nil, map[string]*bintree{
|
|
||||||
"0001_initial_migration.sql": &bintree{dbMigrations0001_initial_migrationSql, map[string]*bintree{}},
|
|
||||||
"0002_dex_admin.sql": &bintree{dbMigrations0002_dex_adminSql, map[string]*bintree{}},
|
|
||||||
"0003_user_created_at.sql": &bintree{dbMigrations0003_user_created_atSql, map[string]*bintree{}},
|
|
||||||
"0004_session_nonce.sql": &bintree{dbMigrations0004_session_nonceSql, map[string]*bintree{}},
|
|
||||||
"0005_refresh_token_create.sql": &bintree{dbMigrations0005_refresh_token_createSql, map[string]*bintree{}},
|
|
||||||
"0006_user_email_unique.sql": &bintree{dbMigrations0006_user_email_uniqueSql, map[string]*bintree{}},
|
|
||||||
"0007_session_scope.sql": &bintree{dbMigrations0007_session_scopeSql, map[string]*bintree{}},
|
|
||||||
"0008_users_active_or_inactive.sql": &bintree{dbMigrations0008_users_active_or_inactiveSql, map[string]*bintree{}},
|
|
||||||
"0009_key_not_primary_key.sql": &bintree{dbMigrations0009_key_not_primary_keySql, map[string]*bintree{}},
|
|
||||||
"0010_client_metadata_field_changed.sql": &bintree{dbMigrations0010_client_metadata_field_changedSql, map[string]*bintree{}},
|
|
||||||
}},
|
|
||||||
}},
|
|
||||||
}}
|
|
||||||
|
|
||||||
// RestoreAsset restores an asset under the given directory
|
|
||||||
func RestoreAsset(dir, name string) error {
|
|
||||||
data, err := Asset(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
info, err := AssetInfo(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestoreAssets restores an asset under the given directory recursively
|
|
||||||
func RestoreAssets(dir, name string) error {
|
|
||||||
children, err := AssetDir(name)
|
|
||||||
// File
|
|
||||||
if err != nil {
|
|
||||||
return RestoreAsset(dir, name)
|
|
||||||
}
|
|
||||||
// Dir
|
|
||||||
for _, child := range children {
|
|
||||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func _filePath(dir, name string) string {
|
|
||||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
||||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
|
||||||
}
|
}
|
||||||
|
|
72
db/migrations/gen_assets.go
Normal file
72
db/migrations/gen_assets.go
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
var funcs = template.FuncMap{
|
||||||
|
"quote": strconv.Quote,
|
||||||
|
}
|
||||||
|
|
||||||
|
var tmpl = template.Must(template.New("assets.go").Delims("{%", "%}").Funcs(funcs).Parse(`package migrations
|
||||||
|
|
||||||
|
// THIS FILE WAS GENERATED BY gen.go DO NOT EDIT
|
||||||
|
|
||||||
|
import "github.com/rubenv/sql-migrate"
|
||||||
|
|
||||||
|
var PostgresMigrations migrate.MigrationSource = &migrate.MemoryMigrationSource{
|
||||||
|
Migrations: []*migrate.Migration{{% range $i, $m := . %}
|
||||||
|
{
|
||||||
|
Id: {% $m.Name | quote %},
|
||||||
|
Up: []string{
|
||||||
|
{% $m.Data | quote %},
|
||||||
|
},
|
||||||
|
},{% end %}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
`))
|
||||||
|
|
||||||
|
// A single postgres migration.
|
||||||
|
type migration struct {
|
||||||
|
Name string
|
||||||
|
Data string
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.SetPrefix("gen_assets: ")
|
||||||
|
log.SetFlags(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
files, err := filepath.Glob("*.sql")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("finding sql files: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(files)
|
||||||
|
migrations := make([]migration, len(files))
|
||||||
|
for i, f := range files {
|
||||||
|
data, err := ioutil.ReadFile(f)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("reading file: %v", err)
|
||||||
|
}
|
||||||
|
migrations[i] = migration{f, string(data)}
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile("assets.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("creating file: %v", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if err := tmpl.Execute(f, migrations); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,6 +1,3 @@
|
||||||
package migrations
|
package migrations
|
||||||
|
|
||||||
// To download go-bindata run `go get -u github.com/jteeuwen/go-bindata/...`
|
//go:generate go run gen_assets.go
|
||||||
|
|
||||||
//go:generate go-bindata -modtime=1 -pkg migrations -o assets.go -ignore \.go$ -prefix "../.." ../../db/migrations
|
|
||||||
//go:generate gofmt -w assets.go
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-gorp/gorp"
|
"github.com/go-gorp/gorp"
|
||||||
|
@ -400,7 +401,7 @@ func (r *userRepo) getByEmail(tx repo.Transaction, email string) (user.User, err
|
||||||
qt := r.quote(userTableName)
|
qt := r.quote(userTableName)
|
||||||
ex := r.executor(tx)
|
ex := r.executor(tx)
|
||||||
var um userModel
|
var um userModel
|
||||||
err := ex.SelectOne(&um, fmt.Sprintf("select * from %s where email = $1", qt), email)
|
err := ex.SelectOne(&um, fmt.Sprintf("select * from %s where email = $1", qt), strings.ToLower(email))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
|
@ -453,7 +454,7 @@ func newUserModel(u *user.User) (*userModel, error) {
|
||||||
um := userModel{
|
um := userModel{
|
||||||
ID: u.ID,
|
ID: u.ID,
|
||||||
DisplayName: u.DisplayName,
|
DisplayName: u.DisplayName,
|
||||||
Email: u.Email,
|
Email: strings.ToLower(u.Email),
|
||||||
EmailVerified: u.EmailVerified,
|
EmailVerified: u.EmailVerified,
|
||||||
Admin: u.Admin,
|
Admin: u.Admin,
|
||||||
Disabled: u.Disabled,
|
Disabled: u.Disabled,
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -104,6 +105,15 @@ func TestNewUser(t *testing.T) {
|
||||||
},
|
},
|
||||||
err: user.ErrorDuplicateEmail,
|
err: user.ErrorDuplicateEmail,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
user: user.User{
|
||||||
|
ID: "ID-same",
|
||||||
|
Email: "email-1@example.com",
|
||||||
|
DisplayName: "A lower case version of the original email",
|
||||||
|
CreatedAt: now,
|
||||||
|
},
|
||||||
|
err: user.ErrorDuplicateEmail,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
user: user.User{
|
user: user.User{
|
||||||
Email: "AnotherEmail@example.com",
|
Email: "AnotherEmail@example.com",
|
||||||
|
@ -211,6 +221,7 @@ func TestUpdateUser(t *testing.T) {
|
||||||
t.Errorf("case %d: want nil err, got %q", i, err)
|
t.Errorf("case %d: want nil err, got %q", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tt.user.Email = strings.ToLower(tt.user.Email)
|
||||||
if diff := pretty.Compare(tt.user, gotUser); diff != "" {
|
if diff := pretty.Compare(tt.user, gotUser); diff != "" {
|
||||||
t.Errorf("case %d: Compare(want, got) = %v", i,
|
t.Errorf("case %d: Compare(want, got) = %v", i,
|
||||||
diff)
|
diff)
|
||||||
|
@ -421,12 +432,19 @@ func findRemoteIdentity(rids []user.RemoteIdentity, rid user.RemoteIdentity) int
|
||||||
|
|
||||||
func TestGetByEmail(t *testing.T) {
|
func TestGetByEmail(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
email string
|
email string
|
||||||
wantErr error
|
wantEmail string
|
||||||
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
email: "Email-1@example.com",
|
email: "Email-1@example.com",
|
||||||
wantErr: nil,
|
wantEmail: "email-1@example.com",
|
||||||
|
wantErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: "EMAIL-1@example.com", // Emails should be case insensitive.
|
||||||
|
wantEmail: "email-1@example.com",
|
||||||
|
wantErr: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
email: "NoSuchEmail@example.com",
|
email: "NoSuchEmail@example.com",
|
||||||
|
@ -446,9 +464,10 @@ func TestGetByEmail(t *testing.T) {
|
||||||
|
|
||||||
if gotErr != nil {
|
if gotErr != nil {
|
||||||
t.Errorf("case %d: want nil err:% q", i, gotErr)
|
t.Errorf("case %d: want nil err:% q", i, gotErr)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if tt.email != gotUser.Email {
|
if tt.wantEmail != gotUser.Email {
|
||||||
t.Errorf("case %d: want=%q, got=%q", i, tt.email, gotUser.Email)
|
t.Errorf("case %d: want=%q, got=%q", i, tt.email, gotUser.Email)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -223,7 +223,7 @@ func handleEmailVerifyFunc(verifiedTpl *template.Template, issuer url.URL, keysF
|
||||||
Error: "Invalid Verification Link",
|
Error: "Invalid Verification Link",
|
||||||
Message: "Your email link has expired or has already been verified.",
|
Message: "Your email link has expired or has already been verified.",
|
||||||
}, http.StatusBadRequest)
|
}, http.StatusBadRequest)
|
||||||
case manager.ErrorEVEmailDoesntMatch:
|
case user.ErrorNotFound:
|
||||||
execTemplateWithStatus(w, verifiedTpl, emailVerifiedTemplateData{
|
execTemplateWithStatus(w, verifiedTpl, emailVerifiedTemplateData{
|
||||||
Error: "Invalid Verification Link",
|
Error: "Invalid Verification Link",
|
||||||
Message: "Your email link does not match the email address on file. Perhaps you have a more recent verification link?",
|
Message: "Your email link does not match the email address on file. Perhaps you have a more recent verification link?",
|
||||||
|
|
|
@ -62,7 +62,7 @@ func (h *InvitationHandler) handleGET(w http.ResponseWriter, r *http.Request) {
|
||||||
// never be able to set their passwords.
|
// never be able to set their passwords.
|
||||||
log.Debugf("error attempting to verify email: %v", err)
|
log.Debugf("error attempting to verify email: %v", err)
|
||||||
switch err {
|
switch err {
|
||||||
case manager.ErrorEVEmailDoesntMatch:
|
case user.ErrorNotFound:
|
||||||
writeAPIError(w, http.StatusBadRequest, newAPIError(errorInvalidRequest,
|
writeAPIError(w, http.StatusBadRequest, newAPIError(errorInvalidRequest,
|
||||||
"Your email does not match the email address on file"))
|
"Your email does not match the email address on file"))
|
||||||
return
|
return
|
||||||
|
|
|
@ -100,7 +100,7 @@ func TestSendResetPasswordEmailHandler(t *testing.T) {
|
||||||
|
|
||||||
wantCode: http.StatusOK,
|
wantCode: http.StatusOK,
|
||||||
wantEmailer: &testEmailer{
|
wantEmailer: &testEmailer{
|
||||||
to: str("Email-1@example.com"),
|
to: str("email-1@example.com"),
|
||||||
from: "noreply@example.com",
|
from: "noreply@example.com",
|
||||||
subject: "Reset Your Password",
|
subject: "Reset Your Password",
|
||||||
},
|
},
|
||||||
|
@ -136,7 +136,7 @@ func TestSendResetPasswordEmailHandler(t *testing.T) {
|
||||||
|
|
||||||
wantCode: http.StatusOK,
|
wantCode: http.StatusOK,
|
||||||
wantEmailer: &testEmailer{
|
wantEmailer: &testEmailer{
|
||||||
to: str("Email-1@example.com"),
|
to: str("email-1@example.com"),
|
||||||
from: "noreply@example.com",
|
from: "noreply@example.com",
|
||||||
subject: "Reset Your Password",
|
subject: "Reset Your Password",
|
||||||
},
|
},
|
||||||
|
|
|
@ -13,9 +13,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrorEVEmailDoesntMatch = errors.New("email in EV doesn't match user email")
|
ErrorEmailAlreadyVerified = errors.New("email already verified")
|
||||||
ErrorEmailAlreadyVerified = errors.New("email already verified")
|
|
||||||
|
|
||||||
ErrorPasswordAlreadyChanged = errors.New("password has already been changed")
|
ErrorPasswordAlreadyChanged = errors.New("password has already been changed")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -228,15 +226,15 @@ func (m *UserManager) VerifyEmail(ev EmailVerifiable) (*url.URL, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
usr, err := m.userRepo.Get(tx, ev.UserID())
|
usr, err := m.userRepo.GetByEmail(tx, ev.Email())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rollback(tx)
|
rollback(tx)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if usr.Email != ev.Email() {
|
if usr.ID != ev.UserID() {
|
||||||
rollback(tx)
|
rollback(tx)
|
||||||
return nil, ErrorEVEmailDoesntMatch
|
return nil, user.ErrorNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
if usr.EmailVerified {
|
if usr.EmailVerified {
|
||||||
|
|
|
@ -311,6 +311,7 @@ func TestVerifyEmail(t *testing.T) {
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("case %d: want err=nil got=%q", i, err)
|
t.Errorf("case %d: want err=nil got=%q", i, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if cb.String() != tt.evClaims[user.ClaimEmailVerificationCallback] {
|
if cb.String() != tt.evClaims[user.ClaimEmailVerificationCallback] {
|
||||||
|
|
|
@ -105,6 +105,7 @@ func (u *User) AddToClaims(claims jose.Claims) {
|
||||||
// UserRepo implementations maintain a persistent set of users.
|
// UserRepo implementations maintain a persistent set of users.
|
||||||
// The following invariants must be maintained:
|
// The following invariants must be maintained:
|
||||||
// * Users must have a unique Email and ID
|
// * Users must have a unique Email and ID
|
||||||
|
// * Emails are case insensitive.
|
||||||
// * No other Users may have the same RemoteIdentity as one of the
|
// * No other Users may have the same RemoteIdentity as one of the
|
||||||
// users. (This constraint may be relaxed in the future)
|
// users. (This constraint may be relaxed in the future)
|
||||||
type UserRepo interface {
|
type UserRepo interface {
|
||||||
|
|
Loading…
Reference in a new issue