forked from mystiq/dex
Merge pull request #577 from coreos/dev-sql
dev branch: add SQL storage implementation
This commit is contained in:
commit
e960f2d56b
143 changed files with 237614 additions and 131 deletions
|
@ -3,6 +3,13 @@ language: go
|
|||
go:
|
||||
- 1.7
|
||||
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
env:
|
||||
- DEX_POSTGRES_DATABASE=postgres DEX_POSTGRES_USER=postgres DEX_POSTGRES_HOST="localhost"
|
||||
|
||||
|
||||
install:
|
||||
- go get -u github.com/golang/lint/golint
|
||||
|
||||
|
|
37
Documentation/dev-running-db-tests.md
Normal file
37
Documentation/dev-running-db-tests.md
Normal file
|
@ -0,0 +1,37 @@
|
|||
# Running database tests
|
||||
|
||||
Running database tests locally require:
|
||||
|
||||
* A systemd based Linux distro.
|
||||
* A recent version of [rkt](https://github.com/coreos/rkt) installed.
|
||||
|
||||
The `standup.sh` script in the SQL directory is used to run databases in
|
||||
containers with systemd daemonizing the process.
|
||||
|
||||
```
|
||||
$ sudo ./storage/sql/standup.sh create postgres
|
||||
Starting postgres. To view progress run
|
||||
|
||||
journalctl -fu dex-postgres
|
||||
|
||||
Running as unit dex-postgres.service.
|
||||
To run tests export the following environment variables:
|
||||
|
||||
export DEX_POSTGRES_DATABASE=postgres; export DEX_POSTGRES_USER=postgres; export DEX_POSTGRES_PASSWORD=postgres; export DEX_POSTGRES_HOST=172.16.28.3:5432
|
||||
|
||||
```
|
||||
|
||||
Exporting the variables will cause the database tests to be run, rather than
|
||||
skipped.
|
||||
|
||||
```
|
||||
$ # sqlite takes forever to compile, be sure to install test dependencies
|
||||
$ go test -v -i ./storage/sql
|
||||
$ go test -v ./storage/sql
|
||||
```
|
||||
|
||||
When you're done, tear down the unit using the `standup.sh` script.
|
||||
|
||||
```
|
||||
$ sudo ./storage/sql/standup.sh destroy postgres
|
||||
```
|
9
Makefile
9
Makefile
|
@ -10,7 +10,6 @@ DOCKER_IMAGE=$(DOCKER_REPO):$(VERSION)
|
|||
|
||||
export GOBIN=$(PWD)/bin
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
export CGO_ENABLED:=0
|
||||
|
||||
LD_FLAGS="-w -X $(REPO_PATH)/version.Version=$(VERSION)"
|
||||
|
||||
|
@ -26,10 +25,12 @@ bin/example-app: FORCE
|
|||
@go install -ldflags $(LD_FLAGS) $(REPO_PATH)/cmd/example-app
|
||||
|
||||
test:
|
||||
@go test $(shell go list ./... | grep -v '/vendor/')
|
||||
@go test -v -i $(shell go list ./... | grep -v '/vendor/')
|
||||
@go test -v $(shell go list ./... | grep -v '/vendor/')
|
||||
|
||||
testrace:
|
||||
@CGO_ENABLED=1 go test --race $(shell go list ./... | grep -v '/vendor/')
|
||||
@go test -v -i --race $(shell go list ./... | grep -v '/vendor/')
|
||||
@go test -v --race $(shell go list ./... | grep -v '/vendor/')
|
||||
|
||||
vet:
|
||||
@go vet $(shell go list ./... | grep -v '/vendor/')
|
||||
|
@ -39,7 +40,7 @@ fmt:
|
|||
|
||||
lint:
|
||||
@for package in $(shell go list ./... | grep -v '/vendor/' | grep -v 'api/apipb'); do \
|
||||
golint $$package; \
|
||||
golint -set_exit_status $$package; \
|
||||
done
|
||||
|
||||
server/templates_default.go: $(wildcard web/templates/**)
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/coreos/dex/storage"
|
||||
"github.com/coreos/dex/storage/kubernetes"
|
||||
"github.com/coreos/dex/storage/memory"
|
||||
"github.com/coreos/dex/storage/sql"
|
||||
)
|
||||
|
||||
// Config is the config format for the main application.
|
||||
|
@ -71,6 +72,18 @@ func (s *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
err = unmarshal(&config)
|
||||
s.Config = &config.Config
|
||||
case "sqlite3":
|
||||
var config struct {
|
||||
Config sql.SQLite3 `yaml:"config"`
|
||||
}
|
||||
err = unmarshal(&config)
|
||||
s.Config = &config.Config
|
||||
case "postgres":
|
||||
var config struct {
|
||||
Config sql.Postgres `yaml:"config"`
|
||||
}
|
||||
err = unmarshal(&config)
|
||||
s.Config = &config.Config
|
||||
default:
|
||||
return fmt.Errorf("unknown storage type %q", storageMeta.Type)
|
||||
}
|
||||
|
|
1
examples/.gitignore
vendored
Normal file
1
examples/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
*.db
|
|
@ -1,7 +1,8 @@
|
|||
issuer: http://127.0.0.1:5556
|
||||
storage:
|
||||
# NOTE(ericchiang): This will be replaced by sqlite3 in the future.
|
||||
type: memory
|
||||
type: sqlite3
|
||||
config:
|
||||
file: examples/dex.db
|
||||
|
||||
web:
|
||||
http: 127.0.0.1:5556
|
||||
|
|
20
glide.lock
generated
20
glide.lock
generated
|
@ -1,8 +1,14 @@
|
|||
hash: 2af4a276277d2ab2ba9de9b0fd67ab7d6b70c07f4171a9efb225f30306d6f3eb
|
||||
updated: 2016-08-08T11:20:44.300140564-07:00
|
||||
hash: 149c717bc83cc279ab6192364776b8c1b6bad8a620ce9d64c56d946276630437
|
||||
updated: 2016-09-30T21:01:57.607704513-07:00
|
||||
imports:
|
||||
- name: github.com/cockroachdb/cockroach-go
|
||||
version: 31611c0501c812f437d4861d87d117053967c955
|
||||
subpackages:
|
||||
- crdb
|
||||
- name: github.com/ericchiang/oidc
|
||||
version: 1907f0e61549f9081f26bdf269f11603496c9dee
|
||||
- name: github.com/go-sql-driver/mysql
|
||||
version: 0b58b37b664c21f3010e836f1b931e1d0b0b0685
|
||||
- name: github.com/golang/protobuf
|
||||
version: 874264fbbb43f4d91e999fecb4b40143ed611400
|
||||
subpackages:
|
||||
|
@ -21,6 +27,12 @@ imports:
|
|||
subpackages:
|
||||
- diff
|
||||
- pretty
|
||||
- name: github.com/lib/pq
|
||||
version: 50761b0867bd1d9d069276790bcd4a3bccf2324a
|
||||
subpackages:
|
||||
- oid
|
||||
- name: github.com/mattn/go-sqlite3
|
||||
version: 3fb7a0e792edd47bf0cf1e919dfc14e2be412e15
|
||||
- name: github.com/mitchellh/go-homedir
|
||||
version: 756f7b183b7ab78acdbbee5c7f392838ed459dda
|
||||
- name: github.com/pquerna/cachecontrol
|
||||
|
@ -48,13 +60,13 @@ imports:
|
|||
- name: google.golang.org/appengine
|
||||
version: 267c27e7492265b84fc6719503b14a1e17975d79
|
||||
subpackages:
|
||||
- urlfetch
|
||||
- internal
|
||||
- internal/urlfetch
|
||||
- internal/base
|
||||
- internal/datastore
|
||||
- internal/log
|
||||
- internal/remote_api
|
||||
- internal/urlfetch
|
||||
- urlfetch
|
||||
- name: gopkg.in/asn1-ber.v1
|
||||
version: 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
- name: gopkg.in/ldap.v2
|
||||
|
|
12
glide.yaml
12
glide.yaml
|
@ -77,3 +77,15 @@ import:
|
|||
- diff
|
||||
- pretty
|
||||
version: eadb3ce320cbab8393bea5ca17bebac3f78a021b
|
||||
|
||||
# SQL drivers
|
||||
- package: github.com/mattn/go-sqlite3
|
||||
version: 3fb7a0e792edd47bf0cf1e919dfc14e2be412e15
|
||||
- package: github.com/lib/pq
|
||||
version: 50761b0867bd1d9d069276790bcd4a3bccf2324a
|
||||
- package: github.com/go-sql-driver/mysql
|
||||
version: 0b58b37b664c21f3010e836f1b931e1d0b0b0685
|
||||
- package: github.com/cockroachdb/cockroach-go
|
||||
version: 31611c0501c812f437d4861d87d117053967c955
|
||||
subpackages:
|
||||
- crdb
|
||||
|
|
|
@ -264,7 +264,8 @@ func (s *Server) finalizeLogin(identity connector.Identity, authReqID, connector
|
|||
}
|
||||
|
||||
updater := func(a storage.AuthRequest) (storage.AuthRequest, error) {
|
||||
a.Claims = &claims
|
||||
a.LoggedIn = true
|
||||
a.Claims = claims
|
||||
a.ConnectorID = connectorID
|
||||
a.ConnectorData = identity.ConnectorData
|
||||
return a, nil
|
||||
|
@ -282,7 +283,7 @@ func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) {
|
|||
s.renderError(w, http.StatusInternalServerError, errServerError, "")
|
||||
return
|
||||
}
|
||||
if authReq.Claims == nil {
|
||||
if !authReq.LoggedIn {
|
||||
log.Printf("Auth request does not have an identity for approval")
|
||||
s.renderError(w, http.StatusInternalServerError, errServerError, "")
|
||||
return
|
||||
|
@ -341,7 +342,7 @@ func (s *Server) sendCodeResponse(w http.ResponseWriter, r *http.Request, authRe
|
|||
ConnectorID: authReq.ConnectorID,
|
||||
Nonce: authReq.Nonce,
|
||||
Scopes: authReq.Scopes,
|
||||
Claims: *authReq.Claims,
|
||||
Claims: authReq.Claims,
|
||||
Expiry: s.now().Add(time.Minute * 5),
|
||||
RedirectURI: authReq.RedirectURI,
|
||||
}
|
||||
|
@ -358,7 +359,7 @@ func (s *Server) sendCodeResponse(w http.ResponseWriter, r *http.Request, authRe
|
|||
}
|
||||
q.Set("code", code.ID)
|
||||
case responseTypeToken:
|
||||
idToken, expiry, err := s.newIDToken(authReq.ClientID, *authReq.Claims, authReq.Scopes, authReq.Nonce)
|
||||
idToken, expiry, err := s.newIDToken(authReq.ClientID, authReq.Claims, authReq.Scopes, authReq.Nonce)
|
||||
if err != nil {
|
||||
log.Printf("failed to create ID token: %v", err)
|
||||
tokenErr(w, errServerError, "", http.StatusInternalServerError)
|
||||
|
|
226
storage/conformance/conformance.go
Normal file
226
storage/conformance/conformance.go
Normal file
|
@ -0,0 +1,226 @@
|
|||
// +build go1.7
|
||||
|
||||
// Package conformance provides conformance tests for storage implementations.
|
||||
package conformance
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/dex/storage"
|
||||
|
||||
"github.com/kylelemons/godebug/pretty"
|
||||
)
|
||||
|
||||
// ensure that values being tested on never expire.
|
||||
var neverExpire = time.Now().UTC().Add(time.Hour * 24 * 365 * 100)
|
||||
|
||||
// StorageFactory is a method for creating a new storage. The returned storage sould be initialized
|
||||
// but shouldn't have any existing data in it.
|
||||
type StorageFactory func() storage.Storage
|
||||
|
||||
// RunTestSuite runs a set of conformance tests against a storage.
|
||||
func RunTestSuite(t *testing.T, sf StorageFactory) {
|
||||
tests := []struct {
|
||||
name string
|
||||
run func(t *testing.T, s storage.Storage)
|
||||
}{
|
||||
{"AuthCodeCRUD", testAuthCodeCRUD},
|
||||
{"AuthRequestCRUD", testAuthRequestCRUD},
|
||||
{"ClientCRUD", testClientCRUD},
|
||||
{"RefreshTokenCRUD", testRefreshTokenCRUD},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
test.run(t, sf())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mustBeErrNotFound(t *testing.T, kind string, err error) {
|
||||
switch {
|
||||
case err == nil:
|
||||
t.Errorf("deleting non-existant %s should return an error", kind)
|
||||
case err != storage.ErrNotFound:
|
||||
t.Errorf("deleting %s expected storage.ErrNotFound, got %v", kind, err)
|
||||
}
|
||||
}
|
||||
|
||||
func testAuthRequestCRUD(t *testing.T, s storage.Storage) {
|
||||
a := storage.AuthRequest{
|
||||
ID: storage.NewID(),
|
||||
ClientID: "foobar",
|
||||
ResponseTypes: []string{"code"},
|
||||
Scopes: []string{"openid", "email"},
|
||||
RedirectURI: "https://localhost:80/callback",
|
||||
Nonce: "foo",
|
||||
State: "bar",
|
||||
ForceApprovalPrompt: true,
|
||||
LoggedIn: true,
|
||||
Expiry: neverExpire,
|
||||
ConnectorID: "ldap",
|
||||
ConnectorData: []byte(`{"some":"data"}`),
|
||||
Claims: storage.Claims{
|
||||
UserID: "1",
|
||||
Username: "jane",
|
||||
Email: "jane.doe@example.com",
|
||||
EmailVerified: true,
|
||||
Groups: []string{"a", "b"},
|
||||
},
|
||||
}
|
||||
|
||||
identity := storage.Claims{Email: "foobar"}
|
||||
|
||||
if err := s.CreateAuthRequest(a); err != nil {
|
||||
t.Fatalf("failed creating auth request: %v", err)
|
||||
}
|
||||
if err := s.UpdateAuthRequest(a.ID, func(old storage.AuthRequest) (storage.AuthRequest, error) {
|
||||
old.Claims = identity
|
||||
old.ConnectorID = "connID"
|
||||
return old, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to update auth request: %v", err)
|
||||
}
|
||||
|
||||
got, err := s.GetAuthRequest(a.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get auth req: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got.Claims, identity) {
|
||||
t.Fatalf("update failed, wanted identity=%#v got %#v", identity, got.Claims)
|
||||
}
|
||||
}
|
||||
|
||||
func testAuthCodeCRUD(t *testing.T, s storage.Storage) {
|
||||
a := storage.AuthCode{
|
||||
ID: storage.NewID(),
|
||||
ClientID: "foobar",
|
||||
RedirectURI: "https://localhost:80/callback",
|
||||
Nonce: "foobar",
|
||||
Scopes: []string{"openid", "email"},
|
||||
Expiry: neverExpire,
|
||||
ConnectorID: "ldap",
|
||||
ConnectorData: []byte(`{"some":"data"}`),
|
||||
Claims: storage.Claims{
|
||||
UserID: "1",
|
||||
Username: "jane",
|
||||
Email: "jane.doe@example.com",
|
||||
EmailVerified: true,
|
||||
Groups: []string{"a", "b"},
|
||||
},
|
||||
}
|
||||
|
||||
if err := s.CreateAuthCode(a); err != nil {
|
||||
t.Fatalf("failed creating auth code: %v", err)
|
||||
}
|
||||
|
||||
got, err := s.GetAuthCode(a.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get auth req: %v", err)
|
||||
}
|
||||
if a.Expiry.Unix() != got.Expiry.Unix() {
|
||||
t.Errorf("auth code expiry did not match want=%s vs got=%s", a.Expiry, got.Expiry)
|
||||
}
|
||||
got.Expiry = a.Expiry // time fields do not compare well
|
||||
if diff := pretty.Compare(a, got); diff != "" {
|
||||
t.Errorf("auth code retrieved from storage did not match: %s", diff)
|
||||
}
|
||||
|
||||
if err := s.DeleteAuthCode(a.ID); err != nil {
|
||||
t.Fatalf("delete auth code: %v", err)
|
||||
}
|
||||
|
||||
_, err = s.GetAuthCode(a.ID)
|
||||
mustBeErrNotFound(t, "auth code", err)
|
||||
}
|
||||
|
||||
func testClientCRUD(t *testing.T, s storage.Storage) {
|
||||
id := storage.NewID()
|
||||
c := storage.Client{
|
||||
ID: id,
|
||||
Secret: "foobar",
|
||||
RedirectURIs: []string{"foo://bar.com/", "https://auth.example.com"},
|
||||
Name: "dex client",
|
||||
LogoURL: "https://goo.gl/JIyzIC",
|
||||
}
|
||||
err := s.DeleteClient(id)
|
||||
mustBeErrNotFound(t, "client", err)
|
||||
|
||||
if err := s.CreateClient(c); err != nil {
|
||||
t.Fatalf("create client: %v", err)
|
||||
}
|
||||
|
||||
getAndCompare := func(id string, want storage.Client) {
|
||||
gc, err := s.GetClient(id)
|
||||
if err != nil {
|
||||
t.Errorf("get client: %v", err)
|
||||
return
|
||||
}
|
||||
if diff := pretty.Compare(want, gc); diff != "" {
|
||||
t.Errorf("client retrieved from storage did not match: %s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
getAndCompare(id, c)
|
||||
|
||||
newSecret := "barfoo"
|
||||
err = s.UpdateClient(id, func(old storage.Client) (storage.Client, error) {
|
||||
old.Secret = newSecret
|
||||
return old, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("update client: %v", err)
|
||||
}
|
||||
c.Secret = newSecret
|
||||
getAndCompare(id, c)
|
||||
|
||||
if err := s.DeleteClient(id); err != nil {
|
||||
t.Fatalf("delete client: %v", err)
|
||||
}
|
||||
|
||||
_, err = s.GetClient(id)
|
||||
mustBeErrNotFound(t, "client", err)
|
||||
}
|
||||
|
||||
func testRefreshTokenCRUD(t *testing.T, s storage.Storage) {
|
||||
id := storage.NewID()
|
||||
refresh := storage.RefreshToken{
|
||||
RefreshToken: id,
|
||||
ClientID: "client_id",
|
||||
ConnectorID: "client_secret",
|
||||
Scopes: []string{"openid", "email", "profile"},
|
||||
Claims: storage.Claims{
|
||||
UserID: "1",
|
||||
Username: "jane",
|
||||
Email: "jane.doe@example.com",
|
||||
EmailVerified: true,
|
||||
Groups: []string{"a", "b"},
|
||||
},
|
||||
}
|
||||
if err := s.CreateRefresh(refresh); err != nil {
|
||||
t.Fatalf("create refresh token: %v", err)
|
||||
}
|
||||
|
||||
getAndCompare := func(id string, want storage.RefreshToken) {
|
||||
gr, err := s.GetRefresh(id)
|
||||
if err != nil {
|
||||
t.Errorf("get refresh: %v", err)
|
||||
return
|
||||
}
|
||||
if diff := pretty.Compare(want, gr); diff != "" {
|
||||
t.Errorf("refresh token retrieved from storage did not match: %s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
getAndCompare(id, refresh)
|
||||
|
||||
if err := s.DeleteRefresh(id); err != nil {
|
||||
t.Fatalf("failed to delete refresh request: %v", err)
|
||||
}
|
||||
|
||||
if _, err := s.GetRefresh(id); err != storage.ErrNotFound {
|
||||
t.Errorf("after deleting refresh expected storage.ErrNotFound, got %v", err)
|
||||
}
|
||||
|
||||
}
|
|
@ -4,7 +4,8 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/dex/storage/storagetest"
|
||||
"github.com/coreos/dex/storage"
|
||||
"github.com/coreos/dex/storage/conformance"
|
||||
)
|
||||
|
||||
func TestLoadClient(t *testing.T) {
|
||||
|
@ -73,5 +74,8 @@ func TestURLFor(t *testing.T) {
|
|||
|
||||
func TestStorage(t *testing.T) {
|
||||
client := loadClient(t)
|
||||
storagetest.RunTestSuite(t, client)
|
||||
conformance.RunTestSuite(t, func() storage.Storage {
|
||||
// TODO(erichiang): Tear down namespaces between each iteration.
|
||||
return client
|
||||
})
|
||||
}
|
||||
|
|
|
@ -118,9 +118,11 @@ type AuthRequest struct {
|
|||
// attempts.
|
||||
ForceApprovalPrompt bool `json:"forceApprovalPrompt,omitempty"`
|
||||
|
||||
LoggedIn bool `json:"loggedIn"`
|
||||
|
||||
// The identity of the end user. Generally nil until the user authenticates
|
||||
// with a backend.
|
||||
Claims *Claims `json:"claims,omitempty"`
|
||||
Claims Claims `json:"claims,omitempty"`
|
||||
// The connector used to login the user. Set when the user authenticates.
|
||||
ConnectorID string `json:"connectorID,omitempty"`
|
||||
ConnectorData []byte `json:"connectorData,omitempty"`
|
||||
|
@ -145,13 +147,11 @@ func toStorageAuthRequest(req AuthRequest) storage.AuthRequest {
|
|||
Nonce: req.Nonce,
|
||||
State: req.State,
|
||||
ForceApprovalPrompt: req.ForceApprovalPrompt,
|
||||
LoggedIn: req.LoggedIn,
|
||||
ConnectorID: req.ConnectorID,
|
||||
ConnectorData: req.ConnectorData,
|
||||
Expiry: req.Expiry,
|
||||
}
|
||||
if req.Claims != nil {
|
||||
i := toStorageClaims(*req.Claims)
|
||||
a.Claims = &i
|
||||
Claims: toStorageClaims(req.Claims),
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
@ -172,14 +172,12 @@ func (cli *client) fromStorageAuthRequest(a storage.AuthRequest) AuthRequest {
|
|||
RedirectURI: a.RedirectURI,
|
||||
Nonce: a.Nonce,
|
||||
State: a.State,
|
||||
LoggedIn: a.LoggedIn,
|
||||
ForceApprovalPrompt: a.ForceApprovalPrompt,
|
||||
ConnectorID: a.ConnectorID,
|
||||
ConnectorData: a.ConnectorData,
|
||||
Expiry: a.Expiry,
|
||||
}
|
||||
if a.Claims != nil {
|
||||
i := fromStorageClaims(*a.Claims)
|
||||
req.Claims = &i
|
||||
Claims: fromStorageClaims(a.Claims),
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
|
|
@ -3,10 +3,9 @@ package memory
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/dex/storage/storagetest"
|
||||
"github.com/coreos/dex/storage/conformance"
|
||||
)
|
||||
|
||||
func TestStorage(t *testing.T) {
|
||||
s := New()
|
||||
storagetest.RunTestSuite(t, s)
|
||||
conformance.RunTestSuite(t, New)
|
||||
}
|
||||
|
|
113
storage/sql/config.go
Normal file
113
storage/sql/config.go
Normal file
|
@ -0,0 +1,113 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/coreos/dex/storage"
|
||||
)
|
||||
|
||||
// SQLite3 options for creating an SQL db.
|
||||
type SQLite3 struct {
|
||||
// File to
|
||||
File string `yaml:"file"`
|
||||
}
|
||||
|
||||
// Open creates a new storage implementation backed by SQLite3
|
||||
func (s *SQLite3) Open() (storage.Storage, error) {
|
||||
return s.open()
|
||||
}
|
||||
|
||||
func (s *SQLite3) open() (*conn, error) {
|
||||
db, err := sql.Open("sqlite3", s.File)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.File == ":memory:" {
|
||||
// sqlite3 uses file locks to coordinate concurrent access. In memory
|
||||
// doesn't support this, so limit the number of connections to 1.
|
||||
db.SetMaxOpenConns(1)
|
||||
}
|
||||
c := &conn{db, flavorSQLite3}
|
||||
if _, err := c.migrate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to perform migrations: %v", err)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
const (
|
||||
sslDisable = "disable"
|
||||
sslRequire = "require"
|
||||
sslVerifyCA = "verify-ca"
|
||||
sslVerifyFull = "verify-full"
|
||||
)
|
||||
|
||||
// PostgresSSL represents SSL options for Postgres databases.
|
||||
type PostgresSSL struct {
|
||||
Mode string
|
||||
CAFile string
|
||||
// Files for client auth.
|
||||
KeyFile string
|
||||
CertFile string
|
||||
}
|
||||
|
||||
// Postgres options for creating an SQL db.
|
||||
type Postgres struct {
|
||||
Database string
|
||||
User string
|
||||
Password string
|
||||
Host string
|
||||
|
||||
SSL PostgresSSL `json:"ssl" yaml:"ssl"`
|
||||
|
||||
ConnectionTimeout int // Seconds
|
||||
}
|
||||
|
||||
// Open creates a new storage implementation backed by Postgres.
|
||||
func (p *Postgres) Open() (storage.Storage, error) {
|
||||
return p.open()
|
||||
}
|
||||
|
||||
func (p *Postgres) open() (*conn, error) {
|
||||
v := url.Values{}
|
||||
set := func(key, val string) {
|
||||
if val != "" {
|
||||
v.Set(key, val)
|
||||
}
|
||||
}
|
||||
set("connect_timeout", strconv.Itoa(p.ConnectionTimeout))
|
||||
set("sslkey", p.SSL.KeyFile)
|
||||
set("sslcert", p.SSL.CertFile)
|
||||
set("sslrootcert", p.SSL.CAFile)
|
||||
if p.SSL.Mode == "" {
|
||||
// Assume the strictest mode if unspecified.
|
||||
p.SSL.Mode = sslVerifyFull
|
||||
}
|
||||
set("sslmode", p.SSL.Mode)
|
||||
|
||||
u := url.URL{
|
||||
Scheme: "postgres",
|
||||
Host: p.Host,
|
||||
Path: "/" + p.Database,
|
||||
RawQuery: v.Encode(),
|
||||
}
|
||||
|
||||
if p.User != "" {
|
||||
if p.Password != "" {
|
||||
u.User = url.UserPassword(p.User, p.Password)
|
||||
} else {
|
||||
u.User = url.User(p.User)
|
||||
}
|
||||
}
|
||||
db, err := sql.Open("postgres", u.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &conn{db, flavorPostgres}
|
||||
if _, err := c.migrate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to perform migrations: %v", err)
|
||||
}
|
||||
return c, nil
|
||||
}
|
90
storage/sql/config_test.go
Normal file
90
storage/sql/config_test.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/dex/storage"
|
||||
"github.com/coreos/dex/storage/conformance"
|
||||
)
|
||||
|
||||
func withTimeout(t time.Duration, f func()) {
|
||||
c := make(chan struct{})
|
||||
defer close(c)
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-c:
|
||||
case <-time.After(t):
|
||||
// Dump a stack trace of the program. Useful for debugging deadlocks.
|
||||
buf := make([]byte, 2<<20)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", buf[:runtime.Stack(buf, true)])
|
||||
panic("test took too long")
|
||||
}
|
||||
}()
|
||||
|
||||
f()
|
||||
}
|
||||
|
||||
func cleanDB(c *conn) error {
|
||||
_, err := c.Exec(`
|
||||
delete from client;
|
||||
delete from auth_request;
|
||||
delete from auth_code;
|
||||
delete from refresh_token;
|
||||
delete from keys;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func TestSQLite3(t *testing.T) {
|
||||
newStorage := func() storage.Storage {
|
||||
// NOTE(ericchiang): In memory means we only get one connection at a time. If we
|
||||
// ever write tests that require using multiple connections, for instance to test
|
||||
// transactions, we need to move to a file based system.
|
||||
s := &SQLite3{":memory:"}
|
||||
conn, err := s.open()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
withTimeout(time.Second*10, func() {
|
||||
conformance.RunTestSuite(t, newStorage)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPostgres(t *testing.T) {
|
||||
if os.Getenv("DEX_POSTGRES_HOST") == "" {
|
||||
t.Skip("postgres envs not set, skipping tests")
|
||||
}
|
||||
p := Postgres{
|
||||
Database: os.Getenv("DEX_POSTGRES_DATABASE"),
|
||||
User: os.Getenv("DEX_POSTGRES_USER"),
|
||||
Password: os.Getenv("DEX_POSTGRES_PASSWORD"),
|
||||
Host: os.Getenv("DEX_POSTGRES_HOST"),
|
||||
SSL: PostgresSSL{
|
||||
Mode: sslDisable, // Postgres container doesn't support SSL.
|
||||
},
|
||||
ConnectionTimeout: 5,
|
||||
}
|
||||
conn, err := p.open()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
newStorage := func() storage.Storage {
|
||||
if err := cleanDB(conn); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
withTimeout(time.Minute*1, func() {
|
||||
conformance.RunTestSuite(t, newStorage)
|
||||
})
|
||||
}
|
487
storage/sql/crud.go
Normal file
487
storage/sql/crud.go
Normal file
|
@ -0,0 +1,487 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/coreos/dex/storage"
|
||||
)
|
||||
|
||||
// TODO(ericchiang): The update, insert, and select methods queries are all
|
||||
// very repetivite. Consider creating them programatically.
|
||||
|
||||
// keysRowID is the ID of the only row we expect to populate the "keys" table.
|
||||
const keysRowID = "keys"
|
||||
|
||||
// encoder wraps the underlying value in a JSON marshaler which is automatically
|
||||
// called by the database/sql package.
|
||||
//
|
||||
// s := []string{"planes", "bears"}
|
||||
// err := db.Exec(`insert into t1 (id, things) values (1, $1)`, encoder(s))
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// var r []byte
|
||||
// err = db.QueryRow(`select things from t1 where id = 1;`).Scan(&r)
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
// fmt.Printf("%s\n", r) // ["planes","bears"]
|
||||
//
|
||||
func encoder(i interface{}) driver.Valuer {
|
||||
return jsonEncoder{i}
|
||||
}
|
||||
|
||||
// decoder wraps the underlying value in a JSON unmarshaler which can then be passed
|
||||
// to a database Scan() method.
|
||||
func decoder(i interface{}) sql.Scanner {
|
||||
return jsonDecoder{i}
|
||||
}
|
||||
|
||||
type jsonEncoder struct {
|
||||
i interface{}
|
||||
}
|
||||
|
||||
func (j jsonEncoder) Value() (driver.Value, error) {
|
||||
b, err := json.Marshal(j.i)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal: %v", err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
type jsonDecoder struct {
|
||||
i interface{}
|
||||
}
|
||||
|
||||
func (j jsonDecoder) Scan(dest interface{}) error {
|
||||
if dest == nil {
|
||||
return errors.New("nil value")
|
||||
}
|
||||
b, ok := dest.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected []byte got %T", dest)
|
||||
}
|
||||
if err := json.Unmarshal(b, &j.i); err != nil {
|
||||
return fmt.Errorf("unmarshal: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Abstract conn vs trans.
|
||||
type querier interface {
|
||||
QueryRow(query string, args ...interface{}) *sql.Row
|
||||
}
|
||||
|
||||
// Abstract row vs rows.
|
||||
type scanner interface {
|
||||
Scan(dest ...interface{}) error
|
||||
}
|
||||
|
||||
func (c *conn) CreateAuthRequest(a storage.AuthRequest) error {
|
||||
_, err := c.Exec(`
|
||||
insert into auth_request (
|
||||
id, client_id, response_types, scopes, redirect_uri, nonce, state,
|
||||
force_approval_prompt, logged_in,
|
||||
claims_user_id, claims_username, claims_email, claims_email_verified,
|
||||
claims_groups,
|
||||
connector_id, connector_data,
|
||||
expiry
|
||||
)
|
||||
values (
|
||||
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17
|
||||
);
|
||||
`,
|
||||
a.ID, a.ClientID, encoder(a.ResponseTypes), encoder(a.Scopes), a.RedirectURI, a.Nonce, a.State,
|
||||
a.ForceApprovalPrompt, a.LoggedIn,
|
||||
a.Claims.UserID, a.Claims.Username, a.Claims.Email, a.Claims.EmailVerified,
|
||||
encoder(a.Claims.Groups),
|
||||
a.ConnectorID, a.ConnectorData,
|
||||
a.Expiry,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert auth request: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *conn) UpdateAuthRequest(id string, updater func(a storage.AuthRequest) (storage.AuthRequest, error)) error {
|
||||
return c.ExecTx(func(tx *trans) error {
|
||||
r, err := getAuthRequest(tx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a, err := updater(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tx.Exec(`
|
||||
update auth_request
|
||||
set
|
||||
client_id = $1, response_types = $2, scopes = $3, redirect_uri = $4,
|
||||
nonce = $5, state = $6, force_approval_prompt = $7, logged_in = $8,
|
||||
claims_user_id = $9, claims_username = $10, claims_email = $11,
|
||||
claims_email_verified = $12,
|
||||
claims_groups = $13,
|
||||
connector_id = $14, connector_data = $15,
|
||||
expiry = $16
|
||||
where id = $17;
|
||||
`,
|
||||
a.ClientID, encoder(a.ResponseTypes), encoder(a.Scopes), a.RedirectURI, a.Nonce, a.State,
|
||||
a.ForceApprovalPrompt, a.LoggedIn,
|
||||
a.Claims.UserID, a.Claims.Username, a.Claims.Email, a.Claims.EmailVerified,
|
||||
encoder(a.Claims.Groups),
|
||||
a.ConnectorID, a.ConnectorData,
|
||||
a.Expiry, a.ID,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update auth request: %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (c *conn) GetAuthRequest(id string) (storage.AuthRequest, error) {
|
||||
return getAuthRequest(c, id)
|
||||
}
|
||||
|
||||
func getAuthRequest(q querier, id string) (a storage.AuthRequest, err error) {
|
||||
err = q.QueryRow(`
|
||||
select
|
||||
id, client_id, response_types, scopes, redirect_uri, nonce, state,
|
||||
force_approval_prompt, logged_in,
|
||||
claims_user_id, claims_username, claims_email, claims_email_verified,
|
||||
claims_groups,
|
||||
connector_id, connector_data, expiry
|
||||
from auth_request where id = $1;
|
||||
`, id).Scan(
|
||||
&a.ID, &a.ClientID, decoder(&a.ResponseTypes), decoder(&a.Scopes), &a.RedirectURI, &a.Nonce, &a.State,
|
||||
&a.ForceApprovalPrompt, &a.LoggedIn,
|
||||
&a.Claims.UserID, &a.Claims.Username, &a.Claims.Email, &a.Claims.EmailVerified,
|
||||
decoder(&a.Claims.Groups),
|
||||
&a.ConnectorID, &a.ConnectorData, &a.Expiry,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return a, storage.ErrNotFound
|
||||
}
|
||||
return a, fmt.Errorf("select auth request: %v", err)
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (c *conn) CreateAuthCode(a storage.AuthCode) error {
|
||||
_, err := c.Exec(`
|
||||
insert into auth_code (
|
||||
id, client_id, scopes, nonce, redirect_uri,
|
||||
claims_user_id, claims_username,
|
||||
claims_email, claims_email_verified, claims_groups,
|
||||
connector_id, connector_data,
|
||||
expiry
|
||||
)
|
||||
values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13);
|
||||
`,
|
||||
a.ID, a.ClientID, encoder(a.Scopes), a.Nonce, a.RedirectURI, a.Claims.UserID,
|
||||
a.Claims.Username, a.Claims.Email, a.Claims.EmailVerified, encoder(a.Claims.Groups),
|
||||
a.ConnectorID, a.ConnectorData, a.Expiry,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *conn) GetAuthCode(id string) (a storage.AuthCode, err error) {
|
||||
err = c.QueryRow(`
|
||||
select
|
||||
id, client_id, scopes, nonce, redirect_uri,
|
||||
claims_user_id, claims_username,
|
||||
claims_email, claims_email_verified, claims_groups,
|
||||
connector_id, connector_data,
|
||||
expiry
|
||||
from auth_code where id = $1;
|
||||
`, id).Scan(
|
||||
&a.ID, &a.ClientID, decoder(&a.Scopes), &a.Nonce, &a.RedirectURI, &a.Claims.UserID,
|
||||
&a.Claims.Username, &a.Claims.Email, &a.Claims.EmailVerified, decoder(&a.Claims.Groups),
|
||||
&a.ConnectorID, &a.ConnectorData, &a.Expiry,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return a, storage.ErrNotFound
|
||||
}
|
||||
return a, fmt.Errorf("select auth code: %v", err)
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (c *conn) CreateRefresh(r storage.RefreshToken) error {
|
||||
_, err := c.Exec(`
|
||||
insert into refresh_token (
|
||||
id, client_id, scopes, nonce,
|
||||
claims_user_id, claims_username, claims_email, claims_email_verified,
|
||||
claims_groups,
|
||||
connector_id, connector_data
|
||||
)
|
||||
values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);
|
||||
`,
|
||||
r.RefreshToken, r.ClientID, encoder(r.Scopes), r.Nonce,
|
||||
r.Claims.UserID, r.Claims.Username, r.Claims.Email, r.Claims.EmailVerified,
|
||||
encoder(r.Claims.Groups),
|
||||
r.ConnectorID, r.ConnectorData,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert refresh_token: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *conn) GetRefresh(id string) (storage.RefreshToken, error) {
|
||||
return scanRefresh(c.QueryRow(`
|
||||
select
|
||||
id, client_id, scopes, nonce,
|
||||
claims_user_id, claims_username, claims_email, claims_email_verified,
|
||||
claims_groups,
|
||||
connector_id, connector_data
|
||||
from refresh_token where id = $1;
|
||||
`, id))
|
||||
}
|
||||
|
||||
func (c *conn) ListRefreshTokens() ([]storage.RefreshToken, error) {
|
||||
rows, err := c.Query(`
|
||||
select
|
||||
id, client_id, scopes, nonce,
|
||||
claims_user_id, claims_username, claims_email, claims_email_verified,
|
||||
claims_groups,
|
||||
connector_id, connector_data
|
||||
from refresh_token;
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query: %v", err)
|
||||
}
|
||||
var tokens []storage.RefreshToken
|
||||
for rows.Next() {
|
||||
r, err := scanRefresh(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokens = append(tokens, r)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("scan: %v", err)
|
||||
}
|
||||
return tokens, nil
|
||||
}
|
||||
|
||||
func scanRefresh(s scanner) (r storage.RefreshToken, err error) {
|
||||
err = s.Scan(
|
||||
&r.RefreshToken, &r.ClientID, decoder(&r.Scopes), &r.Nonce,
|
||||
&r.Claims.UserID, &r.Claims.Username, &r.Claims.Email, &r.Claims.EmailVerified,
|
||||
decoder(&r.Claims.Groups),
|
||||
&r.ConnectorID, &r.ConnectorData,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return r, storage.ErrNotFound
|
||||
}
|
||||
return r, fmt.Errorf("scan refresh_token: %v", err)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (c *conn) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) error {
|
||||
return c.ExecTx(func(tx *trans) error {
|
||||
firstUpdate := false
|
||||
// TODO(ericchiang): errors may cause a transaction be rolled back by the SQL
|
||||
// server. Test this, and consider adding a COUNT() command beforehand.
|
||||
old, err := getKeys(tx)
|
||||
if err != nil {
|
||||
if err != storage.ErrNotFound {
|
||||
return fmt.Errorf("get keys: %v", err)
|
||||
}
|
||||
firstUpdate = true
|
||||
old = storage.Keys{}
|
||||
}
|
||||
|
||||
nk, err := updater(old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if firstUpdate {
|
||||
_, err = tx.Exec(`
|
||||
insert into keys (
|
||||
id, verification_keys, signing_key, signing_key_pub, next_rotation
|
||||
)
|
||||
values ($1, $2, $3, $4, $5);
|
||||
`,
|
||||
keysRowID, encoder(nk.VerificationKeys), encoder(nk.SigningKey),
|
||||
encoder(nk.SigningKeyPub), nk.NextRotation,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert: %v", err)
|
||||
}
|
||||
} else {
|
||||
_, err = tx.Exec(`
|
||||
update keys
|
||||
set
|
||||
verification_keys = $1,
|
||||
signing_key = $2,
|
||||
singing_key_pub = $3,
|
||||
next_rotation = $4
|
||||
where id = $5;
|
||||
`,
|
||||
encoder(nk.VerificationKeys), encoder(nk.SigningKey),
|
||||
encoder(nk.SigningKeyPub), nk.NextRotation, keysRowID,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *conn) GetKeys() (keys storage.Keys, err error) {
|
||||
return getKeys(c)
|
||||
}
|
||||
|
||||
func getKeys(q querier) (keys storage.Keys, err error) {
|
||||
err = q.QueryRow(`
|
||||
select
|
||||
verification_keys, signing_key, signing_key_pub, next_rotation
|
||||
from keys
|
||||
where id=$q
|
||||
`, keysRowID).Scan(
|
||||
decoder(&keys.VerificationKeys), decoder(&keys.SigningKey),
|
||||
decoder(&keys.SigningKeyPub), &keys.NextRotation,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return keys, storage.ErrNotFound
|
||||
}
|
||||
return keys, fmt.Errorf("query keys: %v", err)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (c *conn) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) error {
|
||||
return c.ExecTx(func(tx *trans) error {
|
||||
cli, err := getClient(tx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nc, err := updater(cli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`
|
||||
update client
|
||||
set
|
||||
secret = $1,
|
||||
redirect_uris = $2,
|
||||
trusted_peers = $3,
|
||||
public = $4,
|
||||
name = $5,
|
||||
logo_url = $6
|
||||
where id = $7;
|
||||
`, nc.Secret, encoder(nc.RedirectURIs), encoder(nc.TrustedPeers), nc.Public, nc.Name, nc.LogoURL, id,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update client: %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *conn) CreateClient(cli storage.Client) error {
|
||||
_, err := c.Exec(`
|
||||
insert into client (
|
||||
id, secret, redirect_uris, trusted_peers, public, name, logo_url
|
||||
)
|
||||
values ($1, $2, $3, $4, $5, $6, $7);
|
||||
`,
|
||||
cli.ID, cli.Secret, encoder(cli.RedirectURIs), encoder(cli.TrustedPeers),
|
||||
cli.Public, cli.Name, cli.LogoURL,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert client: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getClient(q querier, id string) (storage.Client, error) {
|
||||
return scanClient(q.QueryRow(`
|
||||
select
|
||||
id, secret, redirect_uris, trusted_peers, public, name, logo_url
|
||||
from client where id = $1;
|
||||
`, id))
|
||||
}
|
||||
|
||||
func (c *conn) GetClient(id string) (storage.Client, error) {
|
||||
return getClient(c, id)
|
||||
}
|
||||
|
||||
func (c *conn) ListClients() ([]storage.Client, error) {
|
||||
rows, err := c.Query(`
|
||||
select
|
||||
id, secret, redirect_uris, trusted_peers, public, name, logo_url
|
||||
from client;
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var clients []storage.Client
|
||||
for rows.Next() {
|
||||
cli, err := scanClient(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clients = append(clients, cli)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
func scanClient(s scanner) (cli storage.Client, err error) {
|
||||
err = s.Scan(
|
||||
&cli.ID, &cli.Secret, decoder(&cli.RedirectURIs), decoder(&cli.TrustedPeers),
|
||||
&cli.Public, &cli.Name, &cli.LogoURL,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return cli, storage.ErrNotFound
|
||||
}
|
||||
return cli, fmt.Errorf("get client: %v", err)
|
||||
}
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func (c *conn) DeleteAuthRequest(id string) error { return c.delete("auth_request", id) }
|
||||
func (c *conn) DeleteAuthCode(id string) error { return c.delete("auth_code", id) }
|
||||
func (c *conn) DeleteClient(id string) error { return c.delete("client", id) }
|
||||
func (c *conn) DeleteRefresh(id string) error { return c.delete("refresh_token", id) }
|
||||
|
||||
// Do NOT call directly. Does not escape table.
|
||||
func (c *conn) delete(table, id string) error {
|
||||
result, err := c.Exec(`delete from `+table+` where id = $1`, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete %s: %v", table, id)
|
||||
}
|
||||
|
||||
// For now mandate that the driver implements RowsAffected. If we ever need to support
|
||||
// a driver that doesn't implement this, we can run this in a transaction with a get beforehand.
|
||||
n, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("rows affected: %v", err)
|
||||
}
|
||||
if n < 1 {
|
||||
return storage.ErrNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
55
storage/sql/crud_test.go
Normal file
55
storage/sql/crud_test.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDecoder(t *testing.T) {
|
||||
db, err := sql.Open("sqlite3", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if _, err := db.Exec(`create table foo ( id integer primary key, bar blob );`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := db.Exec(`insert into foo ( id, bar ) values (1, ?);`, []byte(`["a", "b"]`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var got []string
|
||||
if err := db.QueryRow(`select bar from foo where id = 1;`).Scan(decoder(&got)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []string{"a", "b"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("wanted %q got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoder(t *testing.T) {
|
||||
db, err := sql.Open("sqlite3", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if _, err := db.Exec(`create table foo ( id integer primary key, bar blob );`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
put := []string{"a", "b"}
|
||||
if _, err := db.Exec(`insert into foo ( id, bar ) values (1, ?)`, encoder(put)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var got []byte
|
||||
if err := db.QueryRow(`select bar from foo where id = 1;`).Scan(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []byte(`["a","b"]`)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("wanted %q got %q", want, got)
|
||||
}
|
||||
}
|
24
storage/sql/gc.go
Normal file
24
storage/sql/gc.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type gc struct {
|
||||
now func() time.Time
|
||||
conn *conn
|
||||
}
|
||||
|
||||
var tablesWithGC = []string{"auth_request", "auth_code"}
|
||||
|
||||
func (gc gc) run() error {
|
||||
for _, table := range tablesWithGC {
|
||||
_, err := gc.conn.Exec(`delete from `+table+` where expiry < $1`, gc.now())
|
||||
if err != nil {
|
||||
return fmt.Errorf("gc %s: %v", table, err)
|
||||
}
|
||||
// TODO(ericchiang): when we have levelled logging print how many rows were gc'd
|
||||
}
|
||||
return nil
|
||||
}
|
53
storage/sql/gc_test.go
Normal file
53
storage/sql/gc_test.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/dex/storage"
|
||||
)
|
||||
|
||||
func TestGC(t *testing.T) {
|
||||
// TODO(ericchiang): Add a GarbageCollect method to the storage interface so
|
||||
// we can write conformance tests instead of directly testing each implementation.
|
||||
s := &SQLite3{":memory:"}
|
||||
conn, err := s.open()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
clock := time.Now()
|
||||
now := func() time.Time { return clock }
|
||||
|
||||
runGC := (gc{now, conn}).run
|
||||
|
||||
a := storage.AuthRequest{
|
||||
ID: storage.NewID(),
|
||||
Expiry: now().Add(time.Second),
|
||||
}
|
||||
|
||||
if err := conn.CreateAuthRequest(a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := runGC(); err != nil {
|
||||
t.Errorf("gc failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := conn.GetAuthRequest(a.ID); err != nil {
|
||||
t.Errorf("failed to get auth request after gc: %v", err)
|
||||
}
|
||||
|
||||
clock = clock.Add(time.Minute)
|
||||
|
||||
if err := runGC(); err != nil {
|
||||
t.Errorf("gc failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := conn.GetAuthRequest(a.ID); err == nil {
|
||||
t.Errorf("expected error after gc'ing auth request: %v", err)
|
||||
} else if err != storage.ErrNotFound {
|
||||
t.Errorf("expected error storage.NotFound got: %v", err)
|
||||
}
|
||||
}
|
151
storage/sql/migrate.go
Normal file
151
storage/sql/migrate.go
Normal file
|
@ -0,0 +1,151 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (c *conn) migrate() (int, error) {
|
||||
_, err := c.Exec(`
|
||||
create table if not exists migrations (
|
||||
num integer not null,
|
||||
at timestamp not null
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("creating migration table: %v", err)
|
||||
}
|
||||
|
||||
i := 0
|
||||
done := false
|
||||
for {
|
||||
err := c.ExecTx(func(tx *trans) error {
|
||||
// Within a transaction, perform a single migration.
|
||||
var (
|
||||
num sql.NullInt64
|
||||
n int
|
||||
)
|
||||
if err := tx.QueryRow(`select max(num) from migrations;`).Scan(&num); err != nil {
|
||||
return fmt.Errorf("select max migration: %v", err)
|
||||
}
|
||||
if num.Valid {
|
||||
n = int(num.Int64)
|
||||
}
|
||||
if n >= len(migrations) {
|
||||
done = true
|
||||
return nil
|
||||
}
|
||||
|
||||
migrationNum := n + 1
|
||||
m := migrations[n]
|
||||
if _, err := tx.Exec(m.stmt); err != nil {
|
||||
return fmt.Errorf("migration %d failed: %v", migrationNum, err)
|
||||
}
|
||||
|
||||
q := `insert into migrations (num, at) values ($1, now());`
|
||||
if _, err := tx.Exec(q, migrationNum); err != nil {
|
||||
return fmt.Errorf("update migration table: %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
type migration struct {
|
||||
stmt string
|
||||
// TODO(ericchiang): consider adding additional fields like "forDrivers"
|
||||
}
|
||||
|
||||
// All SQL flavors share migration strategies.
|
||||
var migrations = []migration{
|
||||
{
|
||||
stmt: `
|
||||
create table client (
|
||||
id text not null primary key,
|
||||
secret text not null,
|
||||
redirect_uris bytea not null, -- JSON array of strings
|
||||
trusted_peers bytea not null, -- JSON array of strings
|
||||
public boolean not null,
|
||||
name text not null,
|
||||
logo_url text not null
|
||||
);
|
||||
|
||||
create table auth_request (
|
||||
id text not null primary key,
|
||||
client_id text not null,
|
||||
response_types bytea not null, -- JSON array of strings
|
||||
scopes bytea not null, -- JSON array of strings
|
||||
redirect_uri text not null,
|
||||
nonce text not null,
|
||||
state text not null,
|
||||
force_approval_prompt boolean not null,
|
||||
|
||||
logged_in boolean not null,
|
||||
|
||||
claims_user_id text not null,
|
||||
claims_username text not null,
|
||||
claims_email text not null,
|
||||
claims_email_verified boolean not null,
|
||||
claims_groups bytea not null, -- JSON array of strings
|
||||
|
||||
connector_id text not null,
|
||||
connector_data bytea,
|
||||
|
||||
expiry timestamp not null
|
||||
);
|
||||
|
||||
create table auth_code (
|
||||
id text not null primary key,
|
||||
client_id text not null,
|
||||
scopes bytea not null, -- JSON array of strings
|
||||
nonce text not null,
|
||||
redirect_uri text not null,
|
||||
|
||||
claims_user_id text not null,
|
||||
claims_username text not null,
|
||||
claims_email text not null,
|
||||
claims_email_verified boolean not null,
|
||||
claims_groups bytea not null, -- JSON array of strings
|
||||
|
||||
connector_id text not null,
|
||||
connector_data bytea,
|
||||
|
||||
expiry timestamp not null
|
||||
);
|
||||
|
||||
create table refresh_token (
|
||||
id text not null primary key,
|
||||
client_id text not null,
|
||||
scopes bytea not null, -- JSON array of strings
|
||||
nonce text not null,
|
||||
|
||||
claims_user_id text not null,
|
||||
claims_username text not null,
|
||||
claims_email text not null,
|
||||
claims_email_verified boolean not null,
|
||||
claims_groups bytea not null, -- JSON array of strings
|
||||
|
||||
connector_id text not null,
|
||||
connector_data bytea
|
||||
);
|
||||
|
||||
-- keys is a weird table because we only ever expect there to be a single row
|
||||
create table keys (
|
||||
id text not null primary key,
|
||||
verification_keys bytea not null, -- JSON array
|
||||
signing_key bytea not null, -- JSON object
|
||||
signing_key_pub bytea not null, -- JSON object
|
||||
next_rotation timestamp not null
|
||||
);
|
||||
`,
|
||||
},
|
||||
}
|
25
storage/sql/migrate_test.go
Normal file
25
storage/sql/migrate_test.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
db, err := sql.Open("sqlite3", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
c := &conn{db, flavorSQLite3}
|
||||
for _, want := range []int{len(migrations), 0} {
|
||||
got, err := c.migrate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("expected %d migrations, got %d", want, got)
|
||||
}
|
||||
}
|
||||
}
|
152
storage/sql/sql.go
Normal file
152
storage/sql/sql.go
Normal file
|
@ -0,0 +1,152 @@
|
|||
// Package sql provides SQL implementations of the storage interface.
|
||||
package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"regexp"
|
||||
|
||||
"github.com/cockroachdb/cockroach-go/crdb"
|
||||
|
||||
// import third party drivers
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// flavor represents a specific SQL implementation, and is used to translate query strings
|
||||
// between different drivers. Flavors shouldn't aim to translate all possible SQL statements,
|
||||
// only the specific queries used by the SQL storages.
|
||||
type flavor struct {
|
||||
queryReplacers []replacer
|
||||
|
||||
// Optional function to create and finish a transaction. This is mainly for
|
||||
// cockroachdb support which requires special retry logic provided by their
|
||||
// client package.
|
||||
//
|
||||
// This will be nil for most flavors.
|
||||
//
|
||||
// See: https://github.com/cockroachdb/docs/blob/63761c2e/_includes/app/txn-sample.go#L41-L44
|
||||
executeTx func(db *sql.DB, fn func(*sql.Tx) error) error
|
||||
}
|
||||
|
||||
// A regexp with a replacement string.
|
||||
type replacer struct {
|
||||
re *regexp.Regexp
|
||||
with string
|
||||
}
|
||||
|
||||
// Match a postgres query binds. E.g. "$1", "$12", etc.
|
||||
var bindRegexp = regexp.MustCompile(`\$\d+`)
|
||||
|
||||
func matchLiteral(s string) *regexp.Regexp {
|
||||
return regexp.MustCompile(`\b` + regexp.QuoteMeta(s) + `\b`)
|
||||
}
|
||||
|
||||
var (
|
||||
// The "github.com/lib/pq" driver is the default flavor. All others are
|
||||
// translations of this.
|
||||
flavorPostgres = flavor{}
|
||||
|
||||
flavorSQLite3 = flavor{
|
||||
queryReplacers: []replacer{
|
||||
{bindRegexp, "?"},
|
||||
// Translate for booleans to integers.
|
||||
{matchLiteral("true"), "1"},
|
||||
{matchLiteral("false"), "0"},
|
||||
{matchLiteral("boolean"), "integer"},
|
||||
// Translate other types.
|
||||
{matchLiteral("bytea"), "blob"},
|
||||
// {matchLiteral("timestamp"), "integer"},
|
||||
// SQLite doesn't have a "now()" method, replace with "date('now')"
|
||||
{regexp.MustCompile(`\bnow\(\)`), "date('now')"},
|
||||
},
|
||||
}
|
||||
|
||||
// Incomplete.
|
||||
flavorMySQL = flavor{
|
||||
queryReplacers: []replacer{
|
||||
{bindRegexp, "?"},
|
||||
},
|
||||
}
|
||||
|
||||
// Not tested.
|
||||
flavorCockroach = flavor{
|
||||
executeTx: crdb.ExecuteTx,
|
||||
}
|
||||
)
|
||||
|
||||
func (f flavor) translate(query string) string {
|
||||
// TODO(ericchiang): Heavy cashing.
|
||||
for _, r := range f.queryReplacers {
|
||||
query = r.re.ReplaceAllString(query, r.with)
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// conn is the main database connection.
|
||||
type conn struct {
|
||||
db *sql.DB
|
||||
flavor flavor
|
||||
}
|
||||
|
||||
func (c *conn) Close() error {
|
||||
return c.db.Close()
|
||||
}
|
||||
|
||||
// conn implements the same method signatures as encoding/sql.DB.
|
||||
|
||||
func (c *conn) Exec(query string, args ...interface{}) (sql.Result, error) {
|
||||
query = c.flavor.translate(query)
|
||||
return c.db.Exec(query, args...)
|
||||
}
|
||||
|
||||
func (c *conn) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
||||
query = c.flavor.translate(query)
|
||||
return c.db.Query(query, args...)
|
||||
}
|
||||
|
||||
func (c *conn) QueryRow(query string, args ...interface{}) *sql.Row {
|
||||
query = c.flavor.translate(query)
|
||||
return c.db.QueryRow(query, args...)
|
||||
}
|
||||
|
||||
// ExecTx runs a method which operates on a transaction.
|
||||
func (c *conn) ExecTx(fn func(tx *trans) error) error {
|
||||
if c.flavor.executeTx != nil {
|
||||
return c.flavor.executeTx(c.db, func(sqlTx *sql.Tx) error {
|
||||
return fn(&trans{sqlTx, c})
|
||||
})
|
||||
}
|
||||
|
||||
sqlTx, err := c.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fn(&trans{sqlTx, c}); err != nil {
|
||||
sqlTx.Rollback()
|
||||
return err
|
||||
}
|
||||
return sqlTx.Commit()
|
||||
}
|
||||
|
||||
type trans struct {
|
||||
tx *sql.Tx
|
||||
c *conn
|
||||
}
|
||||
|
||||
// trans implements the same method signatures as encoding/sql.Tx.
|
||||
|
||||
func (t *trans) Exec(query string, args ...interface{}) (sql.Result, error) {
|
||||
query = t.c.flavor.translate(query)
|
||||
return t.tx.Exec(query, args...)
|
||||
}
|
||||
|
||||
func (t *trans) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
||||
query = t.c.flavor.translate(query)
|
||||
return t.tx.Query(query, args...)
|
||||
}
|
||||
|
||||
func (t *trans) QueryRow(query string, args ...interface{}) *sql.Row {
|
||||
query = t.c.flavor.translate(query)
|
||||
return t.tx.QueryRow(query, args...)
|
||||
}
|
55
storage/sql/sql_test.go
Normal file
55
storage/sql/sql_test.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package sql
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestTranslate(t *testing.T) {
|
||||
tests := []struct {
|
||||
testCase string
|
||||
flavor flavor
|
||||
query string
|
||||
exp string
|
||||
}{
|
||||
{
|
||||
"sqlite3 query bind replacement",
|
||||
flavorSQLite3,
|
||||
`select foo from bar where foo.zam = $1;`,
|
||||
`select foo from bar where foo.zam = ?;`,
|
||||
},
|
||||
{
|
||||
"sqlite3 query bind replacement at newline",
|
||||
flavorSQLite3,
|
||||
`select foo from bar where foo.zam = $1`,
|
||||
`select foo from bar where foo.zam = ?`,
|
||||
},
|
||||
{
|
||||
"sqlite3 query true",
|
||||
flavorSQLite3,
|
||||
`select foo from bar where foo.zam = true`,
|
||||
`select foo from bar where foo.zam = 1`,
|
||||
},
|
||||
{
|
||||
"sqlite3 query false",
|
||||
flavorSQLite3,
|
||||
`select foo from bar where foo.zam = false`,
|
||||
`select foo from bar where foo.zam = 0`,
|
||||
},
|
||||
{
|
||||
"sqlite3 bytea",
|
||||
flavorSQLite3,
|
||||
`"connector_data" bytea not null,`,
|
||||
`"connector_data" blob not null,`,
|
||||
},
|
||||
{
|
||||
"sqlite3 now",
|
||||
flavorSQLite3,
|
||||
`now(),`,
|
||||
`date('now'),`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
if got := tc.flavor.translate(tc.query); got != tc.exp {
|
||||
t.Errorf("%s: want=%q, got=%q", tc.testCase, tc.exp, got)
|
||||
}
|
||||
}
|
||||
}
|
111
storage/sql/standup.sh
Executable file
111
storage/sql/standup.sh
Executable file
|
@ -0,0 +1,111 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [ "$EUID" -ne 0 ]
|
||||
then echo "Please run as root"
|
||||
exit
|
||||
fi
|
||||
|
||||
function usage {
|
||||
cat << EOF >> /dev/stderr
|
||||
Usage: sudo ./standup.sh [create|destroy] [postgres|mysql|cockroach]
|
||||
|
||||
This is a script for standing up test databases. It uses systemd to daemonize
|
||||
rkt containers running on a local loopback IP.
|
||||
|
||||
The general workflow is to create a daemonized container, use the output to set
|
||||
the test environment variables, run the tests, then destroy the container.
|
||||
|
||||
sudo ./standup.sh create postgres
|
||||
# Copy environment variables and run tests.
|
||||
go test -v -i # always install test dependencies
|
||||
go test -v
|
||||
sudo ./standup.sh destroy postgres
|
||||
|
||||
EOF
|
||||
exit 2
|
||||
}
|
||||
|
||||
function main {
|
||||
if [ "$#" -ne 2 ]; then
|
||||
usage
|
||||
exit 2
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
"create")
|
||||
case "$2" in
|
||||
"postgres")
|
||||
create_postgres;;
|
||||
"mysql")
|
||||
create_mysql;;
|
||||
*)
|
||||
usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
"destroy")
|
||||
case "$2" in
|
||||
"postgres")
|
||||
destroy_postgres;;
|
||||
"mysql")
|
||||
destroy_mysql;;
|
||||
*)
|
||||
usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function wait_for_file {
|
||||
while [ ! -f $1 ]; do
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function wait_for_container {
|
||||
while [ -z "$( rkt list --full | grep $1 )" ]; do
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function create_postgres {
|
||||
UUID_FILE=/tmp/dex-postgres-uuid
|
||||
if [ -f $UUID_FILE ]; then
|
||||
echo "postgres database already exists, try ./standup.sh destroy postgres"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "Starting postgres. To view progress run:"
|
||||
echo ""
|
||||
echo " journalctl -fu dex-postgres"
|
||||
echo ""
|
||||
systemd-run --unit=dex-postgres \
|
||||
rkt run --uuid-file-save=$UUID_FILE --insecure-options=image docker://postgres:9.6
|
||||
|
||||
wait_for_file $UUID_FILE
|
||||
|
||||
UUID=$( cat $UUID_FILE )
|
||||
wait_for_container $UUID
|
||||
HOST=$( rkt list --full | grep "$UUID" | awk '{ print $NF }' | sed -e 's/default:ip4=//g' )
|
||||
echo "To run tests export the following environment variables:"
|
||||
echo ""
|
||||
echo " export DEX_POSTGRES_DATABASE=postgres; export DEX_POSTGRES_USER=postgres; export DEX_POSTGRES_PASSWORD=postgres; export DEX_POSTGRES_HOST=$HOST:5432"
|
||||
echo ""
|
||||
}
|
||||
|
||||
function destroy_postgres {
|
||||
UUID_FILE=/tmp/dex-postgres-uuid
|
||||
systemctl stop dex-postgres
|
||||
rkt rm --uuid-file=$UUID_FILE
|
||||
rm $UUID_FILE
|
||||
}
|
||||
|
||||
|
||||
main $@
|
|
@ -70,28 +70,41 @@ type Storage interface {
|
|||
DeleteRefresh(id string) error
|
||||
|
||||
// Update functions are assumed to be a performed within a single object transaction.
|
||||
//
|
||||
// updaters may be called multiple times.
|
||||
UpdateClient(id string, updater func(old Client) (Client, error)) error
|
||||
UpdateKeys(updater func(old Keys) (Keys, error)) error
|
||||
UpdateAuthRequest(id string, updater func(a AuthRequest) (AuthRequest, error)) error
|
||||
|
||||
// TODO(ericchiang): Add a GarbageCollect(now time.Time) method so conformance tests
|
||||
// can test implementations.
|
||||
}
|
||||
|
||||
// Client is an OAuth2 client.
|
||||
// Client represents an OAuth2 client.
|
||||
//
|
||||
// For further reading see:
|
||||
// * Trusted peers: https://developers.google.com/identity/protocols/CrossClientAuth
|
||||
// * Public clients: https://developers.google.com/api-client-library/python/auth/installed-app
|
||||
type Client struct {
|
||||
ID string `json:"id" yaml:"id"`
|
||||
Secret string `json:"secret" yaml:"secret"`
|
||||
// Client ID and secret used to identify the client.
|
||||
ID string `json:"id" yaml:"id"`
|
||||
Secret string `json:"secret" yaml:"secret"`
|
||||
|
||||
// A registered set of redirect URIs. When redirecting from dex to the client, the URI
|
||||
// requested to redirect to MUST match one of these values, unless the client is "public".
|
||||
RedirectURIs []string `json:"redirectURIs" yaml:"redirectURIs"`
|
||||
|
||||
// TrustedPeers are a list of peers which can issue tokens on this client's behalf.
|
||||
// TrustedPeers are a list of peers which can issue tokens on this client's behalf using
|
||||
// the dynamic "oauth2:server:client_id:(client_id)" scope. If a peer makes such a request,
|
||||
// this client's ID will appear as the ID Token's audience.
|
||||
//
|
||||
// Clients inherently trust themselves.
|
||||
TrustedPeers []string `json:"trustedPeers" yaml:"trustedPeers"`
|
||||
|
||||
// Public clients must use either use a redirectURL 127.0.0.1:X or "urn:ietf:wg:oauth:2.0:oob"
|
||||
Public bool `json:"public" yaml:"public"`
|
||||
|
||||
// Name and LogoURL used when displaying this client to the end user.
|
||||
Name string `json:"name" yaml:"name"`
|
||||
LogoURL string `json:"logoURL" yaml:"logoURL"`
|
||||
}
|
||||
|
@ -109,53 +122,79 @@ type Claims struct {
|
|||
// AuthRequest represents a OAuth2 client authorization request. It holds the state
|
||||
// of a single auth flow up to the point that the user authorizes the client.
|
||||
type AuthRequest struct {
|
||||
ID string
|
||||
// ID used to identify the authorization request.
|
||||
ID string
|
||||
|
||||
// ID of the client requesting authorization from a user.
|
||||
ClientID string
|
||||
|
||||
// Values parsed from the initial request. These describe the resources the client is
|
||||
// requesting as well as values describing the form of the response.
|
||||
ResponseTypes []string
|
||||
Scopes []string
|
||||
RedirectURI string
|
||||
|
||||
Nonce string
|
||||
State string
|
||||
Nonce string
|
||||
State string
|
||||
|
||||
// The client has indicated that the end user must be shown an approval prompt
|
||||
// on all requests. The server cannot cache their initial action for subsequent
|
||||
// attempts.
|
||||
ForceApprovalPrompt bool
|
||||
|
||||
Expiry time.Time
|
||||
|
||||
// Has the user proved their identity through a backing identity provider?
|
||||
//
|
||||
// If false, the following fields are invalid.
|
||||
LoggedIn bool
|
||||
|
||||
// The identity of the end user. Generally nil until the user authenticates
|
||||
// with a backend.
|
||||
Claims *Claims
|
||||
Claims Claims
|
||||
|
||||
// The connector used to login the user and any data the connector wishes to persists.
|
||||
// Set when the user authenticates.
|
||||
ConnectorID string
|
||||
ConnectorData []byte
|
||||
|
||||
Expiry time.Time
|
||||
}
|
||||
|
||||
// AuthCode represents a code which can be exchanged for an OAuth2 token response.
|
||||
//
|
||||
// This value is created once an end user has authorized a client, the server has
|
||||
// redirect the end user back to the client, but the client hasn't exchanged the
|
||||
// code for an access_token and id_token.
|
||||
type AuthCode struct {
|
||||
// Actual string returned as the "code" value.
|
||||
ID string
|
||||
|
||||
ClientID string
|
||||
// The client this code value is valid for. When exchanging the code for a
|
||||
// token response, the client must use its client_secret to authenticate.
|
||||
ClientID string
|
||||
|
||||
// As part of the OAuth2 spec when a client makes a token request it MUST
|
||||
// present the same redirect_uri as the initial redirect. This values is saved
|
||||
// to make this check.
|
||||
//
|
||||
// https://tools.ietf.org/html/rfc6749#section-4.1.3
|
||||
RedirectURI string
|
||||
|
||||
ConnectorID string
|
||||
ConnectorData []byte
|
||||
|
||||
// If provided by the client in the initial request, the provider MUST create
|
||||
// a ID Token with this nonce in the JWT payload.
|
||||
Nonce string
|
||||
|
||||
// Scopes authorized by the end user for the client.
|
||||
Scopes []string
|
||||
|
||||
Claims Claims
|
||||
// Authentication data provided by an upstream source.
|
||||
ConnectorID string
|
||||
ConnectorData []byte
|
||||
Claims Claims
|
||||
|
||||
Expiry time.Time
|
||||
}
|
||||
|
||||
// RefreshToken is an OAuth2 refresh token.
|
||||
// RefreshToken is an OAuth2 refresh token which allows a client to request new
|
||||
// tokens on the end user's behalf.
|
||||
type RefreshToken struct {
|
||||
// The actual refresh token.
|
||||
RefreshToken string
|
||||
|
@ -163,17 +202,19 @@ type RefreshToken struct {
|
|||
// Client this refresh token is valid for.
|
||||
ClientID string
|
||||
|
||||
// Authentication data provided by an upstream source.
|
||||
ConnectorID string
|
||||
ConnectorData []byte
|
||||
Claims Claims
|
||||
|
||||
// Scopes present in the initial request. Refresh requests may specify a set
|
||||
// of scopes different from the initial request when refreshing a token,
|
||||
// however those scopes must be encompassed by this set.
|
||||
Scopes []string
|
||||
|
||||
// Nonce value supplied during the initial redirect. This is required to be part
|
||||
// of the claims of any future id_token generated by the client.
|
||||
Nonce string
|
||||
|
||||
Claims Claims
|
||||
}
|
||||
|
||||
// VerificationKey is a rotated signing key which can still be used to verify
|
||||
|
@ -188,6 +229,7 @@ type Keys struct {
|
|||
// Key for creating and verifying signatures. These may be nil.
|
||||
SigningKey *jose.JSONWebKey
|
||||
SigningKeyPub *jose.JSONWebKey
|
||||
|
||||
// Old signing keys which have been rotated but can still be used to validate
|
||||
// existing signatures.
|
||||
VerificationKeys []VerificationKey
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
// +build go1.7
|
||||
|
||||
// Package storagetest provides conformance tests for storage implementations.
|
||||
package storagetest
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/dex/storage"
|
||||
)
|
||||
|
||||
var neverExpire = time.Now().Add(time.Hour * 24 * 365 * 100)
|
||||
|
||||
// RunTestSuite runs a set of conformance tests against a storage.
|
||||
func RunTestSuite(t *testing.T, s storage.Storage) {
|
||||
t.Run("UpdateAuthRequest", func(t *testing.T) { testUpdateAuthRequest(t, s) })
|
||||
t.Run("CreateRefresh", func(t *testing.T) { testCreateRefresh(t, s) })
|
||||
}
|
||||
|
||||
func testUpdateAuthRequest(t *testing.T, s storage.Storage) {
|
||||
a := storage.AuthRequest{
|
||||
ID: storage.NewID(),
|
||||
ClientID: "foobar",
|
||||
ResponseTypes: []string{"code"},
|
||||
Scopes: []string{"openid", "email"},
|
||||
RedirectURI: "https://localhost:80/callback",
|
||||
Expiry: neverExpire,
|
||||
}
|
||||
|
||||
identity := storage.Claims{Email: "foobar"}
|
||||
|
||||
if err := s.CreateAuthRequest(a); err != nil {
|
||||
t.Fatalf("failed creating auth request: %v", err)
|
||||
}
|
||||
if err := s.UpdateAuthRequest(a.ID, func(old storage.AuthRequest) (storage.AuthRequest, error) {
|
||||
old.Claims = &identity
|
||||
old.ConnectorID = "connID"
|
||||
return old, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to update auth request: %v", err)
|
||||
}
|
||||
|
||||
got, err := s.GetAuthRequest(a.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get auth req: %v", err)
|
||||
}
|
||||
if got.Claims == nil {
|
||||
t.Fatalf("no identity in auth request")
|
||||
}
|
||||
if !reflect.DeepEqual(*got.Claims, identity) {
|
||||
t.Fatalf("update failed, wanted identity=%#v got %#v", identity, *got.Claims)
|
||||
}
|
||||
}
|
||||
|
||||
func testCreateRefresh(t *testing.T, s storage.Storage) {
|
||||
id := storage.NewID()
|
||||
refresh := storage.RefreshToken{
|
||||
RefreshToken: id,
|
||||
ClientID: "client_id",
|
||||
ConnectorID: "client_secret",
|
||||
Scopes: []string{"openid", "email", "profile"},
|
||||
}
|
||||
if err := s.CreateRefresh(refresh); err != nil {
|
||||
t.Fatalf("create refresh token: %v", err)
|
||||
}
|
||||
gotRefresh, err := s.GetRefresh(id)
|
||||
if err != nil {
|
||||
t.Fatalf("get refresh: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(gotRefresh, refresh) {
|
||||
t.Errorf("refresh returned did not match expected")
|
||||
}
|
||||
|
||||
if err := s.DeleteRefresh(id); err != nil {
|
||||
t.Fatalf("failed to delete refresh request: %v", err)
|
||||
}
|
||||
|
||||
if _, err := s.GetRefresh(id); err != storage.ErrNotFound {
|
||||
t.Errorf("after deleting refresh expected storage.ErrNotFound, got %v", err)
|
||||
}
|
||||
|
||||
}
|
3
vendor/github.com/cockroachdb/cockroach-go/.gitignore
generated
vendored
Normal file
3
vendor/github.com/cockroachdb/cockroach-go/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
*~
|
||||
.#*
|
||||
*.test
|
202
vendor/github.com/cockroachdb/cockroach-go/LICENSE
generated
vendored
Normal file
202
vendor/github.com/cockroachdb/cockroach-go/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
55
vendor/github.com/cockroachdb/cockroach-go/Makefile
generated
vendored
Normal file
55
vendor/github.com/cockroachdb/cockroach-go/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
# Copyright 2016 The Cockroach Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied. See the License for the specific language governing
|
||||
# permissions and limitations under the License. See the AUTHORS file
|
||||
# for names of contributors.
|
||||
#
|
||||
# Author: Spencer Kimball (spencer.kimball@gmail.com)
|
||||
#
|
||||
|
||||
# Cockroach build rules.
|
||||
GO ?= go
|
||||
# Allow setting of go build flags from the command line.
|
||||
GOFLAGS :=
|
||||
|
||||
.PHONY: all
|
||||
all: test check
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
$(GO) test -v -i ./...
|
||||
$(GO) test -v ./...
|
||||
|
||||
.PHONY: deps
|
||||
deps:
|
||||
$(GO) get -d -t ./...
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
@echo "checking for \"path\" imports"
|
||||
@! git grep -F '"path"' -- '*.go'
|
||||
@echo "errcheck"
|
||||
@errcheck ./...
|
||||
@echo "vet"
|
||||
@! go tool vet . 2>&1 | \
|
||||
grep -vE '^vet: cannot process directory .git'
|
||||
@echo "vet --shadow"
|
||||
@! go tool vet --shadow . 2>&1 | \
|
||||
grep -vE '(declaration of err shadows|^vet: cannot process directory \.git)'
|
||||
@echo "golint"
|
||||
@! golint ./... | grep -vE '(\.pb\.go)'
|
||||
@echo "varcheck"
|
||||
@varcheck -e ./...
|
||||
@echo "gofmt (simplify)"
|
||||
@! gofmt -s -d -l . 2>&1 | grep -vE '^\.git/'
|
||||
@echo "goimports"
|
||||
@! goimports -l . | grep -vF 'No Exceptions'
|
2
vendor/github.com/cockroachdb/cockroach-go/README.md
generated
vendored
Normal file
2
vendor/github.com/cockroachdb/cockroach-go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# testing
|
||||
Testing helpers for cockroach clients.
|
18
vendor/github.com/cockroachdb/cockroach-go/circle.yml
generated
vendored
Normal file
18
vendor/github.com/cockroachdb/cockroach-go/circle.yml
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
machine:
|
||||
environment:
|
||||
GOROOT: ${HOME}/go
|
||||
PATH: ${PATH}:${HOME}/go/bin
|
||||
post:
|
||||
- sudo rm -rf /usr/local/go
|
||||
- if [ ! -e go1.6.linux-amd64.tar.gz ]; then curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz; fi
|
||||
- tar -C ${HOME} -xzf go1.6.linux-amd64.tar.gz
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- make deps
|
||||
cache_directories:
|
||||
- ~/go1.6.linux-amd64.tar.gz
|
||||
|
||||
test:
|
||||
override:
|
||||
- make test
|
95
vendor/github.com/cockroachdb/cockroach-go/crdb/tx.go
generated
vendored
Normal file
95
vendor/github.com/cockroachdb/cockroach-go/crdb/tx.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Andrei Matei (andrei@cockroachlabs.com)
|
||||
|
||||
// Package crdb provides helpers for using CockroachDB in client
|
||||
// applications.
|
||||
package crdb
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// AmbiguousCommitError represents an error that left a transaction in an
|
||||
// ambiguous state: unclear if it committed or not.
|
||||
type AmbiguousCommitError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// ExecuteTx runs fn inside a transaction and retries it as needed.
|
||||
// On non-retryable failures, the transaction is aborted and rolled
|
||||
// back; on success, the transaction is committed.
|
||||
// There are cases where the state of a transaction is inherently ambiguous: if
|
||||
// we err on RELEASE with a communication error it's unclear if the transaction
|
||||
// has been committed or not (similar to erroring on COMMIT in other databases).
|
||||
// In that case, we return AmbiguousCommitError.
|
||||
//
|
||||
// For more information about CockroachDB's transaction model see
|
||||
// https://cockroachlabs.com/docs/transactions.html.
|
||||
//
|
||||
// NOTE: the supplied exec closure should not have external side
|
||||
// effects beyond changes to the database.
|
||||
func ExecuteTx(db *sql.DB, fn func(*sql.Tx) error) (err error) {
|
||||
// Start a transaction.
|
||||
var tx *sql.Tx
|
||||
tx, err = db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == nil {
|
||||
// Ignore commit errors. The tx has already been committed by RELEASE.
|
||||
_ = tx.Commit()
|
||||
} else {
|
||||
// We always need to execute a Rollback() so sql.DB releases the
|
||||
// connection.
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
// Specify that we intend to retry this txn in case of CockroachDB retryable
|
||||
// errors.
|
||||
if _, err = tx.Exec("SAVEPOINT cockroach_restart"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
released := false
|
||||
err = fn(tx)
|
||||
if err == nil {
|
||||
// RELEASE acts like COMMIT in CockroachDB. We use it since it gives us an
|
||||
// opportunity to react to retryable errors, whereas tx.Commit() doesn't.
|
||||
released = true
|
||||
if _, err = tx.Exec("RELEASE SAVEPOINT cockroach_restart"); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// We got an error; let's see if it's a retryable one and, if so, restart. We look
|
||||
// for either the standard PG errcode SerializationFailureError:40001 or the Cockroach extension
|
||||
// errcode RetriableError:CR000. The Cockroach extension has been removed server-side, but support
|
||||
// for it has been left here for now to maintain backwards compatibility.
|
||||
pqErr, ok := err.(*pq.Error)
|
||||
if retryable := ok && (pqErr.Code == "CR000" || pqErr.Code == "40001"); !retryable {
|
||||
if released {
|
||||
err = &AmbiguousCommitError{err}
|
||||
}
|
||||
return err
|
||||
}
|
||||
if _, err = tx.Exec("ROLLBACK TO SAVEPOINT cockroach_restart"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
125
vendor/github.com/cockroachdb/cockroach-go/crdb/tx_test.go
generated
vendored
Normal file
125
vendor/github.com/cockroachdb/cockroach-go/crdb/tx_test.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Spencer Kimball (spencer@cockroachlabs.com)
|
||||
|
||||
package crdb
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cockroachdb/cockroach-go/testserver"
|
||||
)
|
||||
|
||||
// TestExecuteTx verifies transaction retry using the classic
|
||||
// example of write skew in bank account balance transfers.
|
||||
func TestExecuteTx(t *testing.T) {
|
||||
db, stop := testserver.NewDBForTest(t)
|
||||
defer stop()
|
||||
|
||||
initStmt := `
|
||||
CREATE DATABASE d;
|
||||
CREATE TABLE d.t (acct INT PRIMARY KEY, balance INT);
|
||||
INSERT INTO d.t (acct, balance) VALUES (1, 100), (2, 100);
|
||||
`
|
||||
if _, err := db.Exec(initStmt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
type queryI interface {
|
||||
Query(string, ...interface{}) (*sql.Rows, error)
|
||||
}
|
||||
|
||||
getBalances := func(q queryI) (bal1, bal2 int, err error) {
|
||||
var rows *sql.Rows
|
||||
rows, err = q.Query(`SELECT balance FROM d.t WHERE acct IN (1, 2);`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
balances := []*int{&bal1, &bal2}
|
||||
i := 0
|
||||
for ; rows.Next(); i += 1 {
|
||||
if err = rows.Scan(balances[i]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if i != 2 {
|
||||
err = fmt.Errorf("expected two balances; got %d", i)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
runTxn := func(wg *sync.WaitGroup, iter *int) <-chan error {
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
*iter = 0
|
||||
errCh <- ExecuteTx(db, func(tx *sql.Tx) error {
|
||||
*iter++
|
||||
bal1, bal2, err := getBalances(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If this is the first iteration, wait for the other tx to also read.
|
||||
if *iter == 1 {
|
||||
wg.Done()
|
||||
wg.Wait()
|
||||
}
|
||||
// Now, subtract from one account and give to the other.
|
||||
if bal1 > bal2 {
|
||||
if _, err := tx.Exec(`
|
||||
UPDATE d.t SET balance=balance-100 WHERE acct=1;
|
||||
UPDATE d.t SET balance=balance+100 WHERE acct=2;
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := tx.Exec(`
|
||||
UPDATE d.t SET balance=balance+100 WHERE acct=1;
|
||||
UPDATE d.t SET balance=balance-100 WHERE acct=2;
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
return errCh
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
var iters1, iters2 int
|
||||
txn1Err := runTxn(&wg, &iters1)
|
||||
txn2Err := runTxn(&wg, &iters2)
|
||||
if err := <-txn1Err; err != nil {
|
||||
t.Errorf("expected success in txn1; got %s", err)
|
||||
}
|
||||
if err := <-txn2Err; err != nil {
|
||||
t.Errorf("expected success in txn2; got %s", err)
|
||||
}
|
||||
if iters1+iters2 <= 2 {
|
||||
t.Errorf("expected at least one retry between the competing transactions; "+
|
||||
"got txn1=%d, txn2=%d", iters1, iters2)
|
||||
}
|
||||
bal1, bal2, err := getBalances(db)
|
||||
if err != nil || bal1 != 100 || bal2 != 100 {
|
||||
t.Errorf("expected balances to be restored without error; "+
|
||||
"got acct1=%d, acct2=%d: %s", bal1, bal2, err)
|
||||
}
|
||||
}
|
119
vendor/github.com/cockroachdb/cockroach-go/testserver/binaries.go
generated
vendored
Normal file
119
vendor/github.com/cockroachdb/cockroach-go/testserver/binaries.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
package testserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
awsBaseURL = "https://s3.amazonaws.com/cockroach/cockroach"
|
||||
latestSuffix = "LATEST"
|
||||
localBinaryPath = "/var/tmp"
|
||||
finishedFileMode = 0555
|
||||
)
|
||||
|
||||
func binaryName() string {
|
||||
return fmt.Sprintf("cockroach.%s-%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
func binaryNameWithSha(sha string) string {
|
||||
return fmt.Sprintf("%s.%s", binaryName(), sha)
|
||||
}
|
||||
|
||||
func binaryPath(sha string) string {
|
||||
return filepath.Join(localBinaryPath, binaryNameWithSha(sha))
|
||||
}
|
||||
|
||||
func latestMarkerURL() string {
|
||||
return fmt.Sprintf("%s/%s.%s", awsBaseURL, binaryName(), latestSuffix)
|
||||
}
|
||||
|
||||
func binaryURL(sha string) string {
|
||||
return fmt.Sprintf("%s/%s.%s", awsBaseURL, binaryName(), sha)
|
||||
}
|
||||
|
||||
func findLatestSha() (string, error) {
|
||||
markerURL := latestMarkerURL()
|
||||
marker, err := http.Get(markerURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not download %s: %s", markerURL)
|
||||
}
|
||||
if marker.StatusCode == 404 {
|
||||
return "", fmt.Errorf("for 404 from GET %s: make sure OS and ARCH are supported",
|
||||
markerURL)
|
||||
} else if marker.StatusCode != 200 {
|
||||
return "", fmt.Errorf("bad response got GET %s: %d (%s)",
|
||||
markerURL, marker.StatusCode, marker.Status)
|
||||
}
|
||||
|
||||
defer marker.Body.Close()
|
||||
body, err := ioutil.ReadAll(marker.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(body)), nil
|
||||
}
|
||||
|
||||
func downloadFile(url, filePath string) error {
|
||||
output, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0200)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating %s: %s", filePath, "-", err)
|
||||
}
|
||||
defer output.Close()
|
||||
|
||||
log.Printf("downloading %s to %s, this may take some time", url, filePath)
|
||||
|
||||
response, err := http.Get(url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error downloading %s: %s", url, err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != 200 {
|
||||
return fmt.Errorf("error downloading %s: %d (%s)", url, response.StatusCode, response.Status)
|
||||
}
|
||||
|
||||
_, err = io.Copy(output, response.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem downloading %s to %s: %s", url, filePath, err)
|
||||
}
|
||||
|
||||
// Download was successful, add the rw bits.
|
||||
return os.Chmod(filePath, finishedFileMode)
|
||||
}
|
||||
|
||||
func downloadLatestBinary() (string, error) {
|
||||
sha, err := findLatestSha()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
localFile := binaryPath(sha)
|
||||
for {
|
||||
finfo, err := os.Stat(localFile)
|
||||
if err != nil {
|
||||
// File does not exist: download it.
|
||||
break
|
||||
}
|
||||
// File already present: check mode.
|
||||
if finfo.Mode().Perm() == finishedFileMode {
|
||||
return localFile, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
err = downloadFile(binaryURL(sha), localFile)
|
||||
if err != nil {
|
||||
_ = os.Remove(localFile)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return localFile, nil
|
||||
}
|
415
vendor/github.com/cockroachdb/cockroach-go/testserver/testserver.go
generated
vendored
Normal file
415
vendor/github.com/cockroachdb/cockroach-go/testserver/testserver.go
generated
vendored
Normal file
|
@ -0,0 +1,415 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Marc Berhault (marc@cockroachlabs.com)
|
||||
|
||||
// Package testserver provides helpers to run a cockroach binary within tests.
|
||||
// It automatically downloads the latest cockroach binary for your platform
|
||||
// (Linux-amd64 and Darwin-amd64 only for now), or attempts to run "cockroach"
|
||||
// from your PATH.
|
||||
//
|
||||
// A normal invocation is (check err every time):
|
||||
// ts, err := testserver.NewTestServer()
|
||||
// err = ts.Start()
|
||||
// defer ts.Stop()
|
||||
// url := ts.PGURL()
|
||||
//
|
||||
// To use, run as follows:
|
||||
// import "github.com/cockroachdb/cockroach-go/testserver"
|
||||
// import "testing"
|
||||
// import "time"
|
||||
//
|
||||
// func TestRunServer(t *testing.T) {
|
||||
// ts, err := testserver.NewTestServer()
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// err := ts.Start()
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// defer ts.Stop()
|
||||
//
|
||||
// url := ts.PGURL()
|
||||
// if url != nil {
|
||||
// t.FatalF("url not found")
|
||||
// }
|
||||
// t.Logf("URL: %s", url.String())
|
||||
//
|
||||
// db, err := sql.Open("postgres", url.String())
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }
|
||||
package testserver
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var sqlURLRegexp = regexp.MustCompile("sql:\\s+(postgresql:.+)\n")
|
||||
|
||||
const (
|
||||
stateNew = iota
|
||||
stateRunning = iota
|
||||
stateStopped = iota
|
||||
stateFailed = iota
|
||||
|
||||
socketPort = 26257
|
||||
socketFileBase = ".s.PGSQL"
|
||||
)
|
||||
|
||||
// TestServer is a helper to run a real cockroach node.
|
||||
type TestServer struct {
|
||||
mu sync.RWMutex
|
||||
state int
|
||||
baseDir string
|
||||
pgURL *url.URL
|
||||
cmd *exec.Cmd
|
||||
args []string
|
||||
stdout string
|
||||
stderr string
|
||||
stdoutBuf logWriter
|
||||
stderrBuf logWriter
|
||||
}
|
||||
|
||||
// NewDBForTest creates a new CockroachDB TestServer instance and
|
||||
// opens a SQL database connection to it. Returns a sql *DB instance a
|
||||
// shutdown function. The caller is responsible for executing the
|
||||
// returned shutdown function on exit.
|
||||
func NewDBForTest(t *testing.T) (*sql.DB, func()) {
|
||||
return NewDBForTestWithDatabase(t, "")
|
||||
}
|
||||
|
||||
// NewDBForTestWithDatabase creates a new CockroachDB TestServer
|
||||
// instance and opens a SQL database connection to it. If database is
|
||||
// specified, the returned connection will explicitly connect to
|
||||
// it. Returns a sql *DB instance a shutdown function. The caller is
|
||||
// responsible for executing the returned shutdown function on exit.
|
||||
func NewDBForTestWithDatabase(t *testing.T, database string) (*sql.DB, func()) {
|
||||
ts, err := NewTestServer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ts.Start()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
url := ts.PGURL()
|
||||
if url == nil {
|
||||
t.Fatalf("url not found")
|
||||
}
|
||||
if len(database) > 0 {
|
||||
url.Path = database
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", url.String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ts.WaitForInit(db)
|
||||
|
||||
return db, func() {
|
||||
_ = db.Close()
|
||||
ts.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// NewTestServer creates a new TestServer, but does not start it.
|
||||
// The cockroach binary for your OS and ARCH is downloaded automatically.
|
||||
// If the download fails, we attempt just call "cockroach", hoping it is
|
||||
// found in your path.
|
||||
func NewTestServer() (*TestServer, error) {
|
||||
cockroachBinary, err := downloadLatestBinary()
|
||||
if err == nil {
|
||||
log.Printf("Using automatically-downloaded binary: %s", cockroachBinary)
|
||||
} else {
|
||||
log.Printf("Attempting to use cockroach binary from your PATH")
|
||||
cockroachBinary = "cockroach"
|
||||
}
|
||||
|
||||
// Force "/tmp/" so avoid OSX's really long temp directory names
|
||||
// which get us over the socket filename length limit.
|
||||
baseDir, err := ioutil.TempDir("/tmp", "cockroach-testserver")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create temp directory: %s", err)
|
||||
}
|
||||
|
||||
logDir := filepath.Join(baseDir, "logs")
|
||||
if err := os.MkdirAll(logDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("could not create logs directory: %s: %s", logDir, err)
|
||||
}
|
||||
|
||||
options := url.Values{
|
||||
"host": []string{baseDir},
|
||||
}
|
||||
pgurl := &url.URL{
|
||||
Scheme: "postgres",
|
||||
User: url.User("root"),
|
||||
Host: fmt.Sprintf(":%d", socketPort),
|
||||
RawQuery: options.Encode(),
|
||||
}
|
||||
socketPath := filepath.Join(baseDir, fmt.Sprintf("%s.%d", socketFileBase, socketPort))
|
||||
|
||||
args := []string{
|
||||
cockroachBinary,
|
||||
"start",
|
||||
"--logtostderr",
|
||||
"--insecure",
|
||||
"--port=0",
|
||||
"--http-port=0",
|
||||
"--socket=" + socketPath,
|
||||
"--store=" + baseDir,
|
||||
}
|
||||
|
||||
ts := &TestServer{
|
||||
baseDir: baseDir,
|
||||
pgURL: pgurl,
|
||||
args: args,
|
||||
stdout: filepath.Join(logDir, "cockroach.stdout"),
|
||||
stderr: filepath.Join(logDir, "cockroach.stderr"),
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// Stdout returns the entire contents of the process' stdout.
|
||||
func (ts *TestServer) Stdout() string {
|
||||
return ts.stdoutBuf.String()
|
||||
}
|
||||
|
||||
// Stderr returns the entire contents of the process' stderr.
|
||||
func (ts *TestServer) Stderr() string {
|
||||
return ts.stderrBuf.String()
|
||||
}
|
||||
|
||||
// PGURL returns the postgres connection URL to reach the started
|
||||
// cockroach node.
|
||||
// It loops until the expected unix socket file exists.
|
||||
// This does not timeout, relying instead on test timeouts.
|
||||
func (ts *TestServer) PGURL() *url.URL {
|
||||
socketPath := filepath.Join(ts.baseDir, fmt.Sprintf("%s.%d", socketFileBase, socketPort))
|
||||
for {
|
||||
if _, err := os.Stat(socketPath); err == nil {
|
||||
return ts.pgURL
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForInit repeatedly looks up the list of databases until
|
||||
// the "system" database exists. It ignores all errors as we are
|
||||
// waiting for the process to start and complete initialization.
|
||||
// This does not timeout, relying instead on test timeouts.
|
||||
func (ts *TestServer) WaitForInit(db *sql.DB) {
|
||||
for {
|
||||
// We issue a query that fails both on connection errors and on the
|
||||
// system database not existing.
|
||||
if _, err := db.Query("SHOW DATABASES"); err == nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
}
|
||||
|
||||
// Start runs the process, returning an error on any problems,
|
||||
// including being unable to start, but not unexpected failure.
|
||||
// It should only be called once in the lifetime of a TestServer object.
|
||||
func (ts *TestServer) Start() error {
|
||||
ts.mu.Lock()
|
||||
if ts.state != stateNew {
|
||||
ts.mu.Unlock()
|
||||
return errors.New("Start() can only be called once")
|
||||
}
|
||||
ts.state = stateRunning
|
||||
ts.mu.Unlock()
|
||||
|
||||
ts.cmd = exec.Command(ts.args[0], ts.args[1:]...)
|
||||
ts.cmd.Env = []string{"COCKROACH_MAX_OFFSET=1ns"}
|
||||
|
||||
if len(ts.stdout) > 0 {
|
||||
wr, err := newFileLogWriter(ts.stdout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open file %s: %s", ts.stdout, err)
|
||||
}
|
||||
ts.stdoutBuf = wr
|
||||
}
|
||||
ts.cmd.Stdout = ts.stdoutBuf
|
||||
|
||||
if len(ts.stderr) > 0 {
|
||||
wr, err := newFileLogWriter(ts.stderr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open file %s: %s", ts.stderr, err)
|
||||
}
|
||||
ts.stderrBuf = wr
|
||||
}
|
||||
ts.cmd.Stderr = ts.stderrBuf
|
||||
|
||||
for k, v := range defaultEnv() {
|
||||
ts.cmd.Env = append(ts.cmd.Env, k+"="+v)
|
||||
}
|
||||
|
||||
err := ts.cmd.Start()
|
||||
if ts.cmd.Process != nil {
|
||||
log.Printf("process %d started: %s", ts.cmd.Process.Pid, strings.Join(ts.args, " "))
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf(err.Error())
|
||||
ts.stdoutBuf.Close()
|
||||
ts.stderrBuf.Close()
|
||||
|
||||
ts.mu.Lock()
|
||||
ts.state = stateFailed
|
||||
ts.mu.Unlock()
|
||||
|
||||
return fmt.Errorf("failure starting process: %s", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
ts.cmd.Wait()
|
||||
|
||||
ts.stdoutBuf.Close()
|
||||
ts.stderrBuf.Close()
|
||||
|
||||
ps := ts.cmd.ProcessState
|
||||
sy := ps.Sys().(syscall.WaitStatus)
|
||||
|
||||
log.Printf("Process %d exited with status %d", ps.Pid(), sy.ExitStatus())
|
||||
log.Printf(ps.String())
|
||||
|
||||
ts.mu.Lock()
|
||||
if sy.ExitStatus() == 0 {
|
||||
ts.state = stateStopped
|
||||
} else {
|
||||
ts.state = stateFailed
|
||||
}
|
||||
ts.mu.Unlock()
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop kills the process if it is still running and cleans its directory.
|
||||
// It should only be called once in the lifetime of a TestServer object.
|
||||
// Logs fatal if the process has already failed.
|
||||
func (ts *TestServer) Stop() {
|
||||
ts.mu.RLock()
|
||||
defer ts.mu.RUnlock()
|
||||
|
||||
if ts.state == stateNew {
|
||||
log.Fatal("Stop() called, but Start() was never called")
|
||||
}
|
||||
if ts.state == stateFailed {
|
||||
log.Fatalf("Stop() called, but process exited unexpectedly. Stdout:\n%s\nStderr:\n%s\n",
|
||||
ts.Stdout(), ts.Stderr())
|
||||
return
|
||||
}
|
||||
|
||||
if ts.state != stateStopped {
|
||||
// Only call kill if not running. It could have exited properly.
|
||||
ts.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
// Only cleanup on intentional stops.
|
||||
_ = os.RemoveAll(ts.baseDir)
|
||||
}
|
||||
|
||||
type logWriter interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
String() string
|
||||
Len() int64
|
||||
Close()
|
||||
}
|
||||
|
||||
type fileLogWriter struct {
|
||||
filename string
|
||||
file *os.File
|
||||
}
|
||||
|
||||
func newFileLogWriter(file string) (*fileLogWriter, error) {
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fileLogWriter{
|
||||
filename: file,
|
||||
file: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (w fileLogWriter) Close() {
|
||||
w.file.Close()
|
||||
}
|
||||
|
||||
func (w fileLogWriter) Write(p []byte) (n int, err error) {
|
||||
return w.file.Write(p)
|
||||
}
|
||||
|
||||
func (w fileLogWriter) String() string {
|
||||
b, err := ioutil.ReadFile(w.filename)
|
||||
if err == nil {
|
||||
return string(b)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w fileLogWriter) Len() int64 {
|
||||
s, err := os.Stat(w.filename)
|
||||
if err == nil {
|
||||
return s.Size()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func defaultEnv() map[string]string {
|
||||
vars := map[string]string{}
|
||||
u, err := user.Current()
|
||||
if err == nil {
|
||||
if _, ok := vars["USER"]; !ok {
|
||||
vars["USER"] = u.Username
|
||||
}
|
||||
if _, ok := vars["UID"]; !ok {
|
||||
vars["UID"] = u.Uid
|
||||
}
|
||||
if _, ok := vars["GID"]; !ok {
|
||||
vars["GID"] = u.Gid
|
||||
}
|
||||
if _, ok := vars["HOME"]; !ok {
|
||||
vars["HOME"] = u.HomeDir
|
||||
}
|
||||
}
|
||||
if _, ok := vars["PATH"]; !ok {
|
||||
vars["PATH"] = os.Getenv("PATH")
|
||||
}
|
||||
return vars
|
||||
}
|
35
vendor/github.com/cockroachdb/cockroach-go/testserver/testserver_test.go
generated
vendored
Normal file
35
vendor/github.com/cockroachdb/cockroach-go/testserver/testserver_test.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Marc Berhault (marc@cockroachlabs.com)
|
||||
|
||||
package testserver_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
// Needed for postgres driver test.
|
||||
"github.com/cockroachdb/cockroach-go/testserver"
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func TestRunServer(t *testing.T) {
|
||||
db, stop := testserver.NewDBForTest(t)
|
||||
defer stop()
|
||||
|
||||
_, err := db.Exec("SELECT 1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
8
vendor/github.com/go-sql-driver/mysql/.gitignore
generated
vendored
Normal file
8
vendor/github.com/go-sql-driver/mysql/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
Icon?
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
12
vendor/github.com/go-sql-driver/mysql/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/go-sql-driver/mysql/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
before_script:
|
||||
- mysql -e 'create database gotest;'
|
52
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
Normal file
52
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
# This is the official list of Go-MySQL-Driver authors for copyright purposes.
|
||||
|
||||
# If you are submitting a patch, please add your name or the name of the
|
||||
# organization which holds the copyright to this list in alphabetical order.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name <email address>
|
||||
# The email address is not required for organizations.
|
||||
# Please keep the list sorted.
|
||||
|
||||
|
||||
# Individual Persons
|
||||
|
||||
Aaron Hopkins <go-sql-driver at die.net>
|
||||
Arne Hormann <arnehormann at gmail.com>
|
||||
Carlos Nieto <jose.carlos at menteslibres.net>
|
||||
Chris Moos <chris at tech9computers.com>
|
||||
Daniel Nichter <nil at codenode.com>
|
||||
Daniël van Eeden <git at myname.nl>
|
||||
DisposaBoy <disposaboy at dby.me>
|
||||
Frederick Mayle <frederickmayle at gmail.com>
|
||||
Gustavo Kristic <gkristic at gmail.com>
|
||||
Hanno Braun <mail at hannobraun.com>
|
||||
Henri Yandell <flamefew at gmail.com>
|
||||
Hirotaka Yamamoto <ymmt2005 at gmail.com>
|
||||
INADA Naoki <songofacandy at gmail.com>
|
||||
James Harr <james.harr at gmail.com>
|
||||
Jian Zhen <zhenjl at gmail.com>
|
||||
Joshua Prunier <joshua.prunier at gmail.com>
|
||||
Julien Lefevre <julien.lefevr at gmail.com>
|
||||
Julien Schmidt <go-sql-driver at julienschmidt.com>
|
||||
Kamil Dziedzic <kamil at klecza.pl>
|
||||
Kevin Malachowski <kevin at chowski.com>
|
||||
Leonardo YongUk Kim <dalinaum at gmail.com>
|
||||
Luca Looz <luca.looz92 at gmail.com>
|
||||
Lucas Liu <extrafliu at gmail.com>
|
||||
Luke Scott <luke at webconnex.com>
|
||||
Michael Woolnough <michael.woolnough at gmail.com>
|
||||
Nicola Peduzzi <thenikso at gmail.com>
|
||||
Paul Bonser <misterpib at gmail.com>
|
||||
Runrioter Wung <runrioter at gmail.com>
|
||||
Soroush Pour <me at soroushjp.com>
|
||||
Stan Putrya <root.vagner at gmail.com>
|
||||
Stanley Gunawan <gunawan.stanley at gmail.com>
|
||||
Xiaobing Jiang <s7v7nislands at gmail.com>
|
||||
Xiuming Chen <cc at cxm.cc>
|
||||
|
||||
# Organizations
|
||||
|
||||
Barracuda Networks, Inc.
|
||||
Google Inc.
|
||||
Stripe Inc.
|
103
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
Normal file
103
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
## HEAD
|
||||
|
||||
Changes:
|
||||
|
||||
- Go 1.1 is no longer supported
|
||||
- Use decimals field from MySQL to format time types (#249)
|
||||
- Buffer optimizations (#269)
|
||||
- TLS ServerName defaults to the host (#283)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
|
||||
- Fixed handling of queries without columns and rows (#255)
|
||||
- Fixed a panic when SetKeepAlive() failed (#298)
|
||||
- Support receiving ERR packet while reading rows (#321)
|
||||
- Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
|
||||
- Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
|
||||
- Actually zero out bytes in handshake response (#378)
|
||||
- Fixed race condition in registering LOAD DATA INFILE handler (#383)
|
||||
- Fixed tests with MySQL 5.7.9+ (#380)
|
||||
- QueryUnescape TLS config names (#397)
|
||||
- Fixed "broken pipe" error by writing to closed socket (#390)
|
||||
|
||||
New Features:
|
||||
- Support for returning table alias on Columns() (#289, #359, #382)
|
||||
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318)
|
||||
- Support for uint64 parameters with high bit set (#332, #345)
|
||||
- Cleartext authentication plugin support (#327)
|
||||
|
||||
|
||||
|
||||
## Version 1.2 (2014-06-03)
|
||||
|
||||
Changes:
|
||||
|
||||
- We switched back to a "rolling release". `go get` installs the current master branch again
|
||||
- Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
|
||||
- Exported errors to allow easy checking from application code
|
||||
- Enabled TCP Keepalives on TCP connections
|
||||
- Optimized INFILE handling (better buffer size calculation, lazy init, ...)
|
||||
- The DSN parser also checks for a missing separating slash
|
||||
- Faster binary date / datetime to string formatting
|
||||
- Also exported the MySQLWarning type
|
||||
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
|
||||
- writePacket() automatically writes the packet size to the header
|
||||
- readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
|
||||
|
||||
New Features:
|
||||
|
||||
- `RegisterDial` allows the usage of a custom dial function to establish the network connection
|
||||
- Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
|
||||
- Logging of critical errors is configurable with `SetLogger`
|
||||
- Google CloudSQL support
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Allow more than 32 parameters in prepared statements
|
||||
- Various old_password fixes
|
||||
- Fixed TestConcurrent test to pass Go's race detection
|
||||
- Fixed appendLengthEncodedInteger for large numbers
|
||||
- Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
|
||||
|
||||
|
||||
## Version 1.1 (2013-11-02)
|
||||
|
||||
Changes:
|
||||
|
||||
- Go-MySQL-Driver now requires Go 1.1
|
||||
- Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
|
||||
- Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
|
||||
- `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
|
||||
- DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
|
||||
- Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
|
||||
- Optimized the buffer for reading
|
||||
- stmt.Query now caches column metadata
|
||||
- New Logo
|
||||
- Changed the copyright header to include all contributors
|
||||
- Improved the LOAD INFILE documentation
|
||||
- The driver struct is now exported to make the driver directly accessible
|
||||
- Refactored the driver tests
|
||||
- Added more benchmarks and moved all to a separate file
|
||||
- Other small refactoring
|
||||
|
||||
New Features:
|
||||
|
||||
- Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
|
||||
- Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
|
||||
- Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
|
||||
- Convert to DB timezone when inserting `time.Time`
|
||||
- Splitted packets (more than 16MB) are now merged correctly
|
||||
- Fixed false positive `io.EOF` errors when the data was fully read
|
||||
- Avoid panics on reuse of closed connections
|
||||
- Fixed empty string producing false nil values
|
||||
- Fixed sign byte for positive TIME fields
|
||||
|
||||
|
||||
## Version 1.0 (2013-05-14)
|
||||
|
||||
Initial Release
|
23
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
generated
vendored
Normal file
23
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Contributing Guidelines
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
|
||||
|
||||
## Contributing Code
|
||||
|
||||
By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
|
||||
Don't forget to add yourself to the AUTHORS file.
|
||||
|
||||
### Code Review
|
||||
|
||||
Everyone is invited to review and comment on pull requests.
|
||||
If it looks fine to you, comment with "LGTM" (Looks good to me).
|
||||
|
||||
If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
|
||||
|
||||
Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
|
||||
|
||||
## Development Ideas
|
||||
|
||||
If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
|
21
vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md
generated
vendored
Normal file
21
vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
### Issue description
|
||||
Tell us what should happen and what happens instead
|
||||
|
||||
### Example code
|
||||
```go
|
||||
If possible, please enter some example code here to reproduce the issue.
|
||||
```
|
||||
|
||||
### Error log
|
||||
```
|
||||
If you have an error log, please paste it here.
|
||||
```
|
||||
|
||||
### Configuration
|
||||
*Driver version (or git SHA):*
|
||||
|
||||
*Go version:* run `go version` in your console
|
||||
|
||||
*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
|
||||
|
||||
*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
|
373
vendor/github.com/go-sql-driver/mysql/LICENSE
generated
vendored
Normal file
373
vendor/github.com/go-sql-driver/mysql/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,373 @@
|
|||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
9
vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
9
vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
### Description
|
||||
Please explain the changes you made here.
|
||||
|
||||
### Checklist
|
||||
- [ ] Code compiles correctly
|
||||
- [ ] Created tests which fail without the change (if possible)
|
||||
- [ ] All tests passing
|
||||
- [ ] Extended the README / documentation, if necessary
|
||||
- [ ] Added myself / the copyright holder to the AUTHORS file
|
420
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
Normal file
420
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
Normal file
|
@ -0,0 +1,420 @@
|
|||
# Go-MySQL-Driver
|
||||
|
||||
A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
|
||||
|
||||
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
|
||||
|
||||
**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases)
|
||||
|
||||
[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql)
|
||||
|
||||
---------------------------------------
|
||||
* [Features](#features)
|
||||
* [Requirements](#requirements)
|
||||
* [Installation](#installation)
|
||||
* [Usage](#usage)
|
||||
* [DSN (Data Source Name)](#dsn-data-source-name)
|
||||
* [Password](#password)
|
||||
* [Protocol](#protocol)
|
||||
* [Address](#address)
|
||||
* [Parameters](#parameters)
|
||||
* [Examples](#examples)
|
||||
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
|
||||
* [time.Time support](#timetime-support)
|
||||
* [Unicode support](#unicode-support)
|
||||
* [Testing / Development](#testing--development)
|
||||
* [License](#license)
|
||||
|
||||
---------------------------------------
|
||||
|
||||
## Features
|
||||
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
|
||||
* Native Go implementation. No C-bindings, just pure Go
|
||||
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
|
||||
* Automatic handling of broken connections
|
||||
* Automatic Connection Pooling *(by database/sql package)*
|
||||
* Supports queries larger than 16MB
|
||||
* Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
|
||||
* Intelligent `LONG DATA` handling in prepared statements
|
||||
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
|
||||
* Optional `time.Time` parsing
|
||||
* Optional placeholder interpolation
|
||||
|
||||
## Requirements
|
||||
* Go 1.2 or higher
|
||||
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
|
||||
|
||||
---------------------------------------
|
||||
|
||||
## Installation
|
||||
Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
|
||||
```bash
|
||||
$ go get github.com/go-sql-driver/mysql
|
||||
```
|
||||
Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
|
||||
|
||||
## Usage
|
||||
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
|
||||
|
||||
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
|
||||
```go
|
||||
import "database/sql"
|
||||
import _ "github.com/go-sql-driver/mysql"
|
||||
|
||||
db, err := sql.Open("mysql", "user:password@/dbname")
|
||||
```
|
||||
|
||||
[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
|
||||
|
||||
|
||||
### DSN (Data Source Name)
|
||||
|
||||
The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
|
||||
```
|
||||
[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
|
||||
```
|
||||
|
||||
A DSN in its fullest form:
|
||||
```
|
||||
username:password@protocol(address)/dbname?param=value
|
||||
```
|
||||
|
||||
Except for the databasename, all values are optional. So the minimal DSN is:
|
||||
```
|
||||
/dbname
|
||||
```
|
||||
|
||||
If you do not want to preselect a database, leave `dbname` empty:
|
||||
```
|
||||
/
|
||||
```
|
||||
This has the same effect as an empty DSN string:
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
|
||||
|
||||
#### Password
|
||||
Passwords can consist of any character. Escaping is **not** necessary.
|
||||
|
||||
#### Protocol
|
||||
See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
|
||||
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
|
||||
|
||||
#### Address
|
||||
For TCP and UDP networks, addresses have the form `host:port`.
|
||||
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
|
||||
The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
|
||||
|
||||
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
|
||||
|
||||
#### Parameters
|
||||
*Parameters are case-sensitive!*
|
||||
|
||||
Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
|
||||
|
||||
##### `allowAllFiles`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
|
||||
[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
|
||||
|
||||
##### `allowCleartextPasswords`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
|
||||
|
||||
##### `allowOldPasswords`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
|
||||
|
||||
##### `charset`
|
||||
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <name>
|
||||
Default: none
|
||||
```
|
||||
|
||||
Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
|
||||
|
||||
Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
|
||||
Unless you need the fallback behavior, please use `collation` instead.
|
||||
|
||||
##### `collation`
|
||||
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <name>
|
||||
Default: utf8_general_ci
|
||||
```
|
||||
|
||||
Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
|
||||
|
||||
A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
|
||||
|
||||
##### `clientFoundRows`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
|
||||
|
||||
##### `columnsWithAlias`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
|
||||
|
||||
```
|
||||
SELECT u.id FROM users as u
|
||||
```
|
||||
|
||||
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
|
||||
|
||||
##### `interpolateParams`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
|
||||
|
||||
*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
|
||||
|
||||
##### `loc`
|
||||
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <escaped name>
|
||||
Default: UTC
|
||||
```
|
||||
|
||||
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
|
||||
|
||||
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
|
||||
|
||||
Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
|
||||
|
||||
##### `multiStatements`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
|
||||
|
||||
When `multiStatements` is used, `?` parameters must only be used in the first statement.
|
||||
|
||||
|
||||
##### `parseTime`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
|
||||
|
||||
|
||||
##### `readTimeout`
|
||||
|
||||
```
|
||||
Type: decimal number
|
||||
Default: 0
|
||||
```
|
||||
|
||||
I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
||||
|
||||
|
||||
##### `strict`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`strict=true` enables the strict mode in which MySQL warnings are treated as errors.
|
||||
|
||||
By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example.
|
||||
|
||||
|
||||
##### `timeout`
|
||||
|
||||
```
|
||||
Type: decimal number
|
||||
Default: OS default
|
||||
```
|
||||
|
||||
*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
|
||||
|
||||
|
||||
##### `tls`
|
||||
|
||||
```
|
||||
Type: bool / string
|
||||
Valid Values: true, false, skip-verify, <name>
|
||||
Default: false
|
||||
```
|
||||
|
||||
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
|
||||
|
||||
|
||||
##### `writeTimeout`
|
||||
|
||||
```
|
||||
Type: decimal number
|
||||
Default: 0
|
||||
```
|
||||
|
||||
I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
||||
|
||||
|
||||
##### System Variables
|
||||
|
||||
All other parameters are interpreted as system variables:
|
||||
* `autocommit`: `"SET autocommit=<value>"`
|
||||
* [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone=<value>"`
|
||||
* [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation=<value>"`
|
||||
* `param`: `"SET <param>=<value>"`
|
||||
|
||||
*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!*
|
||||
|
||||
#### Examples
|
||||
```
|
||||
user@unix(/path/to/socket)/dbname
|
||||
```
|
||||
|
||||
```
|
||||
root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
|
||||
```
|
||||
|
||||
```
|
||||
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
|
||||
```
|
||||
|
||||
Use the [strict mode](#strict) but ignore notes:
|
||||
```
|
||||
user:password@/dbname?strict=true&sql_notes=false
|
||||
```
|
||||
|
||||
TCP via IPv6:
|
||||
```
|
||||
user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
|
||||
```
|
||||
|
||||
TCP on a remote host, e.g. Amazon RDS:
|
||||
```
|
||||
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
|
||||
```
|
||||
|
||||
Google Cloud SQL on App Engine:
|
||||
```
|
||||
user@cloudsql(project-id:instance-name)/dbname
|
||||
```
|
||||
|
||||
TCP using default port (3306) on localhost:
|
||||
```
|
||||
user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
|
||||
```
|
||||
|
||||
Use the default protocol (tcp) and host (localhost:3306):
|
||||
```
|
||||
user:password@/dbname
|
||||
```
|
||||
|
||||
No Database preselected:
|
||||
```
|
||||
user:password@/
|
||||
```
|
||||
|
||||
### `LOAD DATA LOCAL INFILE` support
|
||||
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
|
||||
```go
|
||||
import "github.com/go-sql-driver/mysql"
|
||||
```
|
||||
|
||||
Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
|
||||
|
||||
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
|
||||
|
||||
See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
|
||||
|
||||
|
||||
### `time.Time` support
|
||||
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
|
||||
|
||||
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
|
||||
|
||||
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
|
||||
|
||||
Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
|
||||
|
||||
|
||||
### Unicode support
|
||||
Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
|
||||
|
||||
Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
|
||||
|
||||
Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
|
||||
|
||||
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
|
||||
|
||||
|
||||
## Testing / Development
|
||||
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
|
||||
|
||||
Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
|
||||
If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
|
||||
|
||||
See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
|
||||
|
||||
---------------------------------------
|
||||
|
||||
## License
|
||||
Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
|
||||
|
||||
Mozilla summarizes the license scope as follows:
|
||||
> MPL: The copyleft applies to any files containing MPLed code.
|
||||
|
||||
|
||||
That means:
|
||||
* You can **use** the **unchanged** source code both in private and commercially
|
||||
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
|
||||
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
|
||||
|
||||
Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
|
||||
|
||||
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
|
||||
|
||||
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
|
||||
|
19
vendor/github.com/go-sql-driver/mysql/appengine.go
generated
vendored
Normal file
19
vendor/github.com/go-sql-driver/mysql/appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"appengine/cloudsql"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterDial("cloudsql", cloudsql.Dial)
|
||||
}
|
246
vendor/github.com/go-sql-driver/mysql/benchmark_test.go
generated
vendored
Normal file
246
vendor/github.com/go-sql-driver/mysql/benchmark_test.go
generated
vendored
Normal file
|
@ -0,0 +1,246 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TB testing.B
|
||||
|
||||
func (tb *TB) check(err error) {
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB {
|
||||
tb.check(err)
|
||||
return db
|
||||
}
|
||||
|
||||
func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows {
|
||||
tb.check(err)
|
||||
return rows
|
||||
}
|
||||
|
||||
func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
|
||||
tb.check(err)
|
||||
return stmt
|
||||
}
|
||||
|
||||
func initDB(b *testing.B, queries ...string) *sql.DB {
|
||||
tb := (*TB)(b)
|
||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
||||
for _, query := range queries {
|
||||
if _, err := db.Exec(query); err != nil {
|
||||
if w, ok := err.(MySQLWarnings); ok {
|
||||
b.Logf("warning on %q: %v", query, w)
|
||||
} else {
|
||||
b.Fatalf("error on %q: %v", query, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
const concurrencyLevel = 10
|
||||
|
||||
func BenchmarkQuery(b *testing.B) {
|
||||
tb := (*TB)(b)
|
||||
b.StopTimer()
|
||||
b.ReportAllocs()
|
||||
db := initDB(b,
|
||||
"DROP TABLE IF EXISTS foo",
|
||||
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
|
||||
`INSERT INTO foo VALUES (1, "one")`,
|
||||
`INSERT INTO foo VALUES (2, "two")`,
|
||||
)
|
||||
db.SetMaxIdleConns(concurrencyLevel)
|
||||
defer db.Close()
|
||||
|
||||
stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?"))
|
||||
defer stmt.Close()
|
||||
|
||||
remain := int64(b.N)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(concurrencyLevel)
|
||||
defer wg.Wait()
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < concurrencyLevel; i++ {
|
||||
go func() {
|
||||
for {
|
||||
if atomic.AddInt64(&remain, -1) < 0 {
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
var got string
|
||||
tb.check(stmt.QueryRow(1).Scan(&got))
|
||||
if got != "one" {
|
||||
b.Errorf("query = %q; want one", got)
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExec(b *testing.B) {
|
||||
tb := (*TB)(b)
|
||||
b.StopTimer()
|
||||
b.ReportAllocs()
|
||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
||||
db.SetMaxIdleConns(concurrencyLevel)
|
||||
defer db.Close()
|
||||
|
||||
stmt := tb.checkStmt(db.Prepare("DO 1"))
|
||||
defer stmt.Close()
|
||||
|
||||
remain := int64(b.N)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(concurrencyLevel)
|
||||
defer wg.Wait()
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < concurrencyLevel; i++ {
|
||||
go func() {
|
||||
for {
|
||||
if atomic.AddInt64(&remain, -1) < 0 {
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := stmt.Exec(); err != nil {
|
||||
b.Fatal(err.Error())
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// data, but no db writes
|
||||
var roundtripSample []byte
|
||||
|
||||
func initRoundtripBenchmarks() ([]byte, int, int) {
|
||||
if roundtripSample == nil {
|
||||
roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024))
|
||||
}
|
||||
return roundtripSample, 16, len(roundtripSample)
|
||||
}
|
||||
|
||||
func BenchmarkRoundtripTxt(b *testing.B) {
|
||||
b.StopTimer()
|
||||
sample, min, max := initRoundtripBenchmarks()
|
||||
sampleString := string(sample)
|
||||
b.ReportAllocs()
|
||||
tb := (*TB)(b)
|
||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
||||
defer db.Close()
|
||||
b.StartTimer()
|
||||
var result string
|
||||
for i := 0; i < b.N; i++ {
|
||||
length := min + i
|
||||
if length > max {
|
||||
length = max
|
||||
}
|
||||
test := sampleString[0:length]
|
||||
rows := tb.checkRows(db.Query(`SELECT "` + test + `"`))
|
||||
if !rows.Next() {
|
||||
rows.Close()
|
||||
b.Fatalf("crashed")
|
||||
}
|
||||
err := rows.Scan(&result)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
b.Fatalf("crashed")
|
||||
}
|
||||
if result != test {
|
||||
rows.Close()
|
||||
b.Errorf("mismatch")
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRoundtripBin(b *testing.B) {
|
||||
b.StopTimer()
|
||||
sample, min, max := initRoundtripBenchmarks()
|
||||
b.ReportAllocs()
|
||||
tb := (*TB)(b)
|
||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
||||
defer db.Close()
|
||||
stmt := tb.checkStmt(db.Prepare("SELECT ?"))
|
||||
defer stmt.Close()
|
||||
b.StartTimer()
|
||||
var result sql.RawBytes
|
||||
for i := 0; i < b.N; i++ {
|
||||
length := min + i
|
||||
if length > max {
|
||||
length = max
|
||||
}
|
||||
test := sample[0:length]
|
||||
rows := tb.checkRows(stmt.Query(test))
|
||||
if !rows.Next() {
|
||||
rows.Close()
|
||||
b.Fatalf("crashed")
|
||||
}
|
||||
err := rows.Scan(&result)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
b.Fatalf("crashed")
|
||||
}
|
||||
if !bytes.Equal(result, test) {
|
||||
rows.Close()
|
||||
b.Errorf("mismatch")
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInterpolation(b *testing.B) {
|
||||
mc := &mysqlConn{
|
||||
cfg: &Config{
|
||||
InterpolateParams: true,
|
||||
Loc: time.UTC,
|
||||
},
|
||||
maxPacketAllowed: maxPacketSize,
|
||||
maxWriteSize: maxPacketSize - 1,
|
||||
buf: newBuffer(nil),
|
||||
}
|
||||
|
||||
args := []driver.Value{
|
||||
int64(42424242),
|
||||
float64(math.Pi),
|
||||
false,
|
||||
time.Unix(1423411542, 807015000),
|
||||
[]byte("bytes containing special chars ' \" \a \x00"),
|
||||
"string containing special chars ' \" \a \x00",
|
||||
}
|
||||
q := "SELECT ?, ?, ?, ?, ?, ?"
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := mc.interpolateParams(q, args)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
147
vendor/github.com/go-sql-driver/mysql/buffer.go
generated
vendored
Normal file
147
vendor/github.com/go-sql-driver/mysql/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
const defaultBufSize = 4096
|
||||
|
||||
// A buffer which is used for both reading and writing.
|
||||
// This is possible since communication on each connection is synchronous.
|
||||
// In other words, we can't write and read simultaneously on the same connection.
|
||||
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
|
||||
// Also highly optimized for this particular use case.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
nc net.Conn
|
||||
idx int
|
||||
length int
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func newBuffer(nc net.Conn) buffer {
|
||||
var b [defaultBufSize]byte
|
||||
return buffer{
|
||||
buf: b[:],
|
||||
nc: nc,
|
||||
}
|
||||
}
|
||||
|
||||
// fill reads into the buffer until at least _need_ bytes are in it
|
||||
func (b *buffer) fill(need int) error {
|
||||
n := b.length
|
||||
|
||||
// move existing data to the beginning
|
||||
if n > 0 && b.idx > 0 {
|
||||
copy(b.buf[0:n], b.buf[b.idx:])
|
||||
}
|
||||
|
||||
// grow buffer if necessary
|
||||
// TODO: let the buffer shrink again at some point
|
||||
// Maybe keep the org buf slice and swap back?
|
||||
if need > len(b.buf) {
|
||||
// Round up to the next multiple of the default size
|
||||
newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
|
||||
copy(newBuf, b.buf)
|
||||
b.buf = newBuf
|
||||
}
|
||||
|
||||
b.idx = 0
|
||||
|
||||
for {
|
||||
if b.timeout > 0 {
|
||||
if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nn, err := b.nc.Read(b.buf[n:])
|
||||
n += nn
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
if n < need {
|
||||
continue
|
||||
}
|
||||
b.length = n
|
||||
return nil
|
||||
|
||||
case io.EOF:
|
||||
if n >= need {
|
||||
b.length = n
|
||||
return nil
|
||||
}
|
||||
return io.ErrUnexpectedEOF
|
||||
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// returns next N bytes from buffer.
|
||||
// The returned slice is only guaranteed to be valid until the next read
|
||||
func (b *buffer) readNext(need int) ([]byte, error) {
|
||||
if b.length < need {
|
||||
// refill
|
||||
if err := b.fill(need); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
offset := b.idx
|
||||
b.idx += need
|
||||
b.length -= need
|
||||
return b.buf[offset:b.idx], nil
|
||||
}
|
||||
|
||||
// returns a buffer with the requested size.
|
||||
// If possible, a slice from the existing buffer is returned.
|
||||
// Otherwise a bigger buffer is made.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeBuffer(length int) []byte {
|
||||
if b.length > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// test (cheap) general case first
|
||||
if length <= defaultBufSize || length <= cap(b.buf) {
|
||||
return b.buf[:length]
|
||||
}
|
||||
|
||||
if length < maxPacketSize {
|
||||
b.buf = make([]byte, length)
|
||||
return b.buf
|
||||
}
|
||||
return make([]byte, length)
|
||||
}
|
||||
|
||||
// shortcut which can be used if the requested buffer is guaranteed to be
|
||||
// smaller than defaultBufSize
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeSmallBuffer(length int) []byte {
|
||||
if b.length == 0 {
|
||||
return b.buf[:length]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// takeCompleteBuffer returns the complete existing buffer.
|
||||
// This can be used if the necessary buffer size is unknown.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeCompleteBuffer() []byte {
|
||||
if b.length == 0 {
|
||||
return b.buf
|
||||
}
|
||||
return nil
|
||||
}
|
250
vendor/github.com/go-sql-driver/mysql/collations.go
generated
vendored
Normal file
250
vendor/github.com/go-sql-driver/mysql/collations.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
const defaultCollation = "utf8_general_ci"
|
||||
|
||||
// A list of available collations mapped to the internal ID.
|
||||
// To update this map use the following MySQL query:
|
||||
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
|
||||
var collations = map[string]byte{
|
||||
"big5_chinese_ci": 1,
|
||||
"latin2_czech_cs": 2,
|
||||
"dec8_swedish_ci": 3,
|
||||
"cp850_general_ci": 4,
|
||||
"latin1_german1_ci": 5,
|
||||
"hp8_english_ci": 6,
|
||||
"koi8r_general_ci": 7,
|
||||
"latin1_swedish_ci": 8,
|
||||
"latin2_general_ci": 9,
|
||||
"swe7_swedish_ci": 10,
|
||||
"ascii_general_ci": 11,
|
||||
"ujis_japanese_ci": 12,
|
||||
"sjis_japanese_ci": 13,
|
||||
"cp1251_bulgarian_ci": 14,
|
||||
"latin1_danish_ci": 15,
|
||||
"hebrew_general_ci": 16,
|
||||
"tis620_thai_ci": 18,
|
||||
"euckr_korean_ci": 19,
|
||||
"latin7_estonian_cs": 20,
|
||||
"latin2_hungarian_ci": 21,
|
||||
"koi8u_general_ci": 22,
|
||||
"cp1251_ukrainian_ci": 23,
|
||||
"gb2312_chinese_ci": 24,
|
||||
"greek_general_ci": 25,
|
||||
"cp1250_general_ci": 26,
|
||||
"latin2_croatian_ci": 27,
|
||||
"gbk_chinese_ci": 28,
|
||||
"cp1257_lithuanian_ci": 29,
|
||||
"latin5_turkish_ci": 30,
|
||||
"latin1_german2_ci": 31,
|
||||
"armscii8_general_ci": 32,
|
||||
"utf8_general_ci": 33,
|
||||
"cp1250_czech_cs": 34,
|
||||
"ucs2_general_ci": 35,
|
||||
"cp866_general_ci": 36,
|
||||
"keybcs2_general_ci": 37,
|
||||
"macce_general_ci": 38,
|
||||
"macroman_general_ci": 39,
|
||||
"cp852_general_ci": 40,
|
||||
"latin7_general_ci": 41,
|
||||
"latin7_general_cs": 42,
|
||||
"macce_bin": 43,
|
||||
"cp1250_croatian_ci": 44,
|
||||
"utf8mb4_general_ci": 45,
|
||||
"utf8mb4_bin": 46,
|
||||
"latin1_bin": 47,
|
||||
"latin1_general_ci": 48,
|
||||
"latin1_general_cs": 49,
|
||||
"cp1251_bin": 50,
|
||||
"cp1251_general_ci": 51,
|
||||
"cp1251_general_cs": 52,
|
||||
"macroman_bin": 53,
|
||||
"utf16_general_ci": 54,
|
||||
"utf16_bin": 55,
|
||||
"utf16le_general_ci": 56,
|
||||
"cp1256_general_ci": 57,
|
||||
"cp1257_bin": 58,
|
||||
"cp1257_general_ci": 59,
|
||||
"utf32_general_ci": 60,
|
||||
"utf32_bin": 61,
|
||||
"utf16le_bin": 62,
|
||||
"binary": 63,
|
||||
"armscii8_bin": 64,
|
||||
"ascii_bin": 65,
|
||||
"cp1250_bin": 66,
|
||||
"cp1256_bin": 67,
|
||||
"cp866_bin": 68,
|
||||
"dec8_bin": 69,
|
||||
"greek_bin": 70,
|
||||
"hebrew_bin": 71,
|
||||
"hp8_bin": 72,
|
||||
"keybcs2_bin": 73,
|
||||
"koi8r_bin": 74,
|
||||
"koi8u_bin": 75,
|
||||
"latin2_bin": 77,
|
||||
"latin5_bin": 78,
|
||||
"latin7_bin": 79,
|
||||
"cp850_bin": 80,
|
||||
"cp852_bin": 81,
|
||||
"swe7_bin": 82,
|
||||
"utf8_bin": 83,
|
||||
"big5_bin": 84,
|
||||
"euckr_bin": 85,
|
||||
"gb2312_bin": 86,
|
||||
"gbk_bin": 87,
|
||||
"sjis_bin": 88,
|
||||
"tis620_bin": 89,
|
||||
"ucs2_bin": 90,
|
||||
"ujis_bin": 91,
|
||||
"geostd8_general_ci": 92,
|
||||
"geostd8_bin": 93,
|
||||
"latin1_spanish_ci": 94,
|
||||
"cp932_japanese_ci": 95,
|
||||
"cp932_bin": 96,
|
||||
"eucjpms_japanese_ci": 97,
|
||||
"eucjpms_bin": 98,
|
||||
"cp1250_polish_ci": 99,
|
||||
"utf16_unicode_ci": 101,
|
||||
"utf16_icelandic_ci": 102,
|
||||
"utf16_latvian_ci": 103,
|
||||
"utf16_romanian_ci": 104,
|
||||
"utf16_slovenian_ci": 105,
|
||||
"utf16_polish_ci": 106,
|
||||
"utf16_estonian_ci": 107,
|
||||
"utf16_spanish_ci": 108,
|
||||
"utf16_swedish_ci": 109,
|
||||
"utf16_turkish_ci": 110,
|
||||
"utf16_czech_ci": 111,
|
||||
"utf16_danish_ci": 112,
|
||||
"utf16_lithuanian_ci": 113,
|
||||
"utf16_slovak_ci": 114,
|
||||
"utf16_spanish2_ci": 115,
|
||||
"utf16_roman_ci": 116,
|
||||
"utf16_persian_ci": 117,
|
||||
"utf16_esperanto_ci": 118,
|
||||
"utf16_hungarian_ci": 119,
|
||||
"utf16_sinhala_ci": 120,
|
||||
"utf16_german2_ci": 121,
|
||||
"utf16_croatian_ci": 122,
|
||||
"utf16_unicode_520_ci": 123,
|
||||
"utf16_vietnamese_ci": 124,
|
||||
"ucs2_unicode_ci": 128,
|
||||
"ucs2_icelandic_ci": 129,
|
||||
"ucs2_latvian_ci": 130,
|
||||
"ucs2_romanian_ci": 131,
|
||||
"ucs2_slovenian_ci": 132,
|
||||
"ucs2_polish_ci": 133,
|
||||
"ucs2_estonian_ci": 134,
|
||||
"ucs2_spanish_ci": 135,
|
||||
"ucs2_swedish_ci": 136,
|
||||
"ucs2_turkish_ci": 137,
|
||||
"ucs2_czech_ci": 138,
|
||||
"ucs2_danish_ci": 139,
|
||||
"ucs2_lithuanian_ci": 140,
|
||||
"ucs2_slovak_ci": 141,
|
||||
"ucs2_spanish2_ci": 142,
|
||||
"ucs2_roman_ci": 143,
|
||||
"ucs2_persian_ci": 144,
|
||||
"ucs2_esperanto_ci": 145,
|
||||
"ucs2_hungarian_ci": 146,
|
||||
"ucs2_sinhala_ci": 147,
|
||||
"ucs2_german2_ci": 148,
|
||||
"ucs2_croatian_ci": 149,
|
||||
"ucs2_unicode_520_ci": 150,
|
||||
"ucs2_vietnamese_ci": 151,
|
||||
"ucs2_general_mysql500_ci": 159,
|
||||
"utf32_unicode_ci": 160,
|
||||
"utf32_icelandic_ci": 161,
|
||||
"utf32_latvian_ci": 162,
|
||||
"utf32_romanian_ci": 163,
|
||||
"utf32_slovenian_ci": 164,
|
||||
"utf32_polish_ci": 165,
|
||||
"utf32_estonian_ci": 166,
|
||||
"utf32_spanish_ci": 167,
|
||||
"utf32_swedish_ci": 168,
|
||||
"utf32_turkish_ci": 169,
|
||||
"utf32_czech_ci": 170,
|
||||
"utf32_danish_ci": 171,
|
||||
"utf32_lithuanian_ci": 172,
|
||||
"utf32_slovak_ci": 173,
|
||||
"utf32_spanish2_ci": 174,
|
||||
"utf32_roman_ci": 175,
|
||||
"utf32_persian_ci": 176,
|
||||
"utf32_esperanto_ci": 177,
|
||||
"utf32_hungarian_ci": 178,
|
||||
"utf32_sinhala_ci": 179,
|
||||
"utf32_german2_ci": 180,
|
||||
"utf32_croatian_ci": 181,
|
||||
"utf32_unicode_520_ci": 182,
|
||||
"utf32_vietnamese_ci": 183,
|
||||
"utf8_unicode_ci": 192,
|
||||
"utf8_icelandic_ci": 193,
|
||||
"utf8_latvian_ci": 194,
|
||||
"utf8_romanian_ci": 195,
|
||||
"utf8_slovenian_ci": 196,
|
||||
"utf8_polish_ci": 197,
|
||||
"utf8_estonian_ci": 198,
|
||||
"utf8_spanish_ci": 199,
|
||||
"utf8_swedish_ci": 200,
|
||||
"utf8_turkish_ci": 201,
|
||||
"utf8_czech_ci": 202,
|
||||
"utf8_danish_ci": 203,
|
||||
"utf8_lithuanian_ci": 204,
|
||||
"utf8_slovak_ci": 205,
|
||||
"utf8_spanish2_ci": 206,
|
||||
"utf8_roman_ci": 207,
|
||||
"utf8_persian_ci": 208,
|
||||
"utf8_esperanto_ci": 209,
|
||||
"utf8_hungarian_ci": 210,
|
||||
"utf8_sinhala_ci": 211,
|
||||
"utf8_german2_ci": 212,
|
||||
"utf8_croatian_ci": 213,
|
||||
"utf8_unicode_520_ci": 214,
|
||||
"utf8_vietnamese_ci": 215,
|
||||
"utf8_general_mysql500_ci": 223,
|
||||
"utf8mb4_unicode_ci": 224,
|
||||
"utf8mb4_icelandic_ci": 225,
|
||||
"utf8mb4_latvian_ci": 226,
|
||||
"utf8mb4_romanian_ci": 227,
|
||||
"utf8mb4_slovenian_ci": 228,
|
||||
"utf8mb4_polish_ci": 229,
|
||||
"utf8mb4_estonian_ci": 230,
|
||||
"utf8mb4_spanish_ci": 231,
|
||||
"utf8mb4_swedish_ci": 232,
|
||||
"utf8mb4_turkish_ci": 233,
|
||||
"utf8mb4_czech_ci": 234,
|
||||
"utf8mb4_danish_ci": 235,
|
||||
"utf8mb4_lithuanian_ci": 236,
|
||||
"utf8mb4_slovak_ci": 237,
|
||||
"utf8mb4_spanish2_ci": 238,
|
||||
"utf8mb4_roman_ci": 239,
|
||||
"utf8mb4_persian_ci": 240,
|
||||
"utf8mb4_esperanto_ci": 241,
|
||||
"utf8mb4_hungarian_ci": 242,
|
||||
"utf8mb4_sinhala_ci": 243,
|
||||
"utf8mb4_german2_ci": 244,
|
||||
"utf8mb4_croatian_ci": 245,
|
||||
"utf8mb4_unicode_520_ci": 246,
|
||||
"utf8mb4_vietnamese_ci": 247,
|
||||
}
|
||||
|
||||
// A blacklist of collations which is unsafe to interpolate parameters.
|
||||
// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
|
||||
var unsafeCollations = map[string]bool{
|
||||
"big5_chinese_ci": true,
|
||||
"sjis_japanese_ci": true,
|
||||
"gbk_chinese_ci": true,
|
||||
"big5_bin": true,
|
||||
"gb2312_bin": true,
|
||||
"gbk_bin": true,
|
||||
"sjis_bin": true,
|
||||
"cp932_japanese_ci": true,
|
||||
"cp932_bin": true,
|
||||
}
|
372
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
Normal file
372
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
Normal file
|
@ -0,0 +1,372 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type mysqlConn struct {
|
||||
buf buffer
|
||||
netConn net.Conn
|
||||
affectedRows uint64
|
||||
insertId uint64
|
||||
cfg *Config
|
||||
maxPacketAllowed int
|
||||
maxWriteSize int
|
||||
writeTimeout time.Duration
|
||||
flags clientFlag
|
||||
status statusFlag
|
||||
sequence uint8
|
||||
parseTime bool
|
||||
strict bool
|
||||
}
|
||||
|
||||
// Handles parameters set in DSN after the connection is established
|
||||
func (mc *mysqlConn) handleParams() (err error) {
|
||||
for param, val := range mc.cfg.Params {
|
||||
switch param {
|
||||
// Charset
|
||||
case "charset":
|
||||
charsets := strings.Split(val, ",")
|
||||
for i := range charsets {
|
||||
// ignore errors here - a charset may not exist
|
||||
err = mc.exec("SET NAMES " + charsets[i])
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// System Vars
|
||||
default:
|
||||
err = mc.exec("SET " + param + "=" + val + "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Begin() (driver.Tx, error) {
|
||||
if mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
err := mc.exec("START TRANSACTION")
|
||||
if err == nil {
|
||||
return &mysqlTx{mc}, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Close() (err error) {
|
||||
// Makes Close idempotent
|
||||
if mc.netConn != nil {
|
||||
err = mc.writeCommandPacket(comQuit)
|
||||
}
|
||||
|
||||
mc.cleanup()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Closes the network connection and unsets internal variables. Do not call this
|
||||
// function after successfully authentication, call Close instead. This function
|
||||
// is called before auth or on auth failure because MySQL will have already
|
||||
// closed the network connection.
|
||||
func (mc *mysqlConn) cleanup() {
|
||||
// Makes cleanup idempotent
|
||||
if mc.netConn != nil {
|
||||
if err := mc.netConn.Close(); err != nil {
|
||||
errLog.Print(err)
|
||||
}
|
||||
mc.netConn = nil
|
||||
}
|
||||
mc.cfg = nil
|
||||
mc.buf.nc = nil
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
|
||||
if mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comStmtPrepare, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmt := &mysqlStmt{
|
||||
mc: mc,
|
||||
}
|
||||
|
||||
// Read Result
|
||||
columnCount, err := stmt.readPrepareResultPacket()
|
||||
if err == nil {
|
||||
if stmt.paramCount > 0 {
|
||||
if err = mc.readUntilEOF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if columnCount > 0 {
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
}
|
||||
|
||||
return stmt, err
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
|
||||
buf := mc.buf.takeCompleteBuffer()
|
||||
if buf == nil {
|
||||
// can not take the buffer. Something must be wrong with the connection
|
||||
errLog.Print(ErrBusyBuffer)
|
||||
return "", driver.ErrBadConn
|
||||
}
|
||||
buf = buf[:0]
|
||||
argPos := 0
|
||||
|
||||
for i := 0; i < len(query); i++ {
|
||||
q := strings.IndexByte(query[i:], '?')
|
||||
if q == -1 {
|
||||
buf = append(buf, query[i:]...)
|
||||
break
|
||||
}
|
||||
buf = append(buf, query[i:i+q]...)
|
||||
i += q
|
||||
|
||||
arg := args[argPos]
|
||||
argPos++
|
||||
|
||||
if arg == nil {
|
||||
buf = append(buf, "NULL"...)
|
||||
continue
|
||||
}
|
||||
|
||||
switch v := arg.(type) {
|
||||
case int64:
|
||||
buf = strconv.AppendInt(buf, v, 10)
|
||||
case float64:
|
||||
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
|
||||
case bool:
|
||||
if v {
|
||||
buf = append(buf, '1')
|
||||
} else {
|
||||
buf = append(buf, '0')
|
||||
}
|
||||
case time.Time:
|
||||
if v.IsZero() {
|
||||
buf = append(buf, "'0000-00-00'"...)
|
||||
} else {
|
||||
v := v.In(mc.cfg.Loc)
|
||||
v = v.Add(time.Nanosecond * 500) // To round under microsecond
|
||||
year := v.Year()
|
||||
year100 := year / 100
|
||||
year1 := year % 100
|
||||
month := v.Month()
|
||||
day := v.Day()
|
||||
hour := v.Hour()
|
||||
minute := v.Minute()
|
||||
second := v.Second()
|
||||
micro := v.Nanosecond() / 1000
|
||||
|
||||
buf = append(buf, []byte{
|
||||
'\'',
|
||||
digits10[year100], digits01[year100],
|
||||
digits10[year1], digits01[year1],
|
||||
'-',
|
||||
digits10[month], digits01[month],
|
||||
'-',
|
||||
digits10[day], digits01[day],
|
||||
' ',
|
||||
digits10[hour], digits01[hour],
|
||||
':',
|
||||
digits10[minute], digits01[minute],
|
||||
':',
|
||||
digits10[second], digits01[second],
|
||||
}...)
|
||||
|
||||
if micro != 0 {
|
||||
micro10000 := micro / 10000
|
||||
micro100 := micro / 100 % 100
|
||||
micro1 := micro % 100
|
||||
buf = append(buf, []byte{
|
||||
'.',
|
||||
digits10[micro10000], digits01[micro10000],
|
||||
digits10[micro100], digits01[micro100],
|
||||
digits10[micro1], digits01[micro1],
|
||||
}...)
|
||||
}
|
||||
buf = append(buf, '\'')
|
||||
}
|
||||
case []byte:
|
||||
if v == nil {
|
||||
buf = append(buf, "NULL"...)
|
||||
} else {
|
||||
buf = append(buf, "_binary'"...)
|
||||
if mc.status&statusNoBackslashEscapes == 0 {
|
||||
buf = escapeBytesBackslash(buf, v)
|
||||
} else {
|
||||
buf = escapeBytesQuotes(buf, v)
|
||||
}
|
||||
buf = append(buf, '\'')
|
||||
}
|
||||
case string:
|
||||
buf = append(buf, '\'')
|
||||
if mc.status&statusNoBackslashEscapes == 0 {
|
||||
buf = escapeStringBackslash(buf, v)
|
||||
} else {
|
||||
buf = escapeStringQuotes(buf, v)
|
||||
}
|
||||
buf = append(buf, '\'')
|
||||
default:
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
|
||||
if len(buf)+4 > mc.maxPacketAllowed {
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
}
|
||||
if argPos != len(args) {
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
|
||||
if mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if len(args) != 0 {
|
||||
if !mc.cfg.InterpolateParams {
|
||||
return nil, driver.ErrSkip
|
||||
}
|
||||
// try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
|
||||
prepared, err := mc.interpolateParams(query, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query = prepared
|
||||
args = nil
|
||||
}
|
||||
mc.affectedRows = 0
|
||||
mc.insertId = 0
|
||||
|
||||
err := mc.exec(query)
|
||||
if err == nil {
|
||||
return &mysqlResult{
|
||||
affectedRows: int64(mc.affectedRows),
|
||||
insertId: int64(mc.insertId),
|
||||
}, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Internal function to execute commands
|
||||
func (mc *mysqlConn) exec(query string) error {
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comQuery, query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err == nil && resLen > 0 {
|
||||
if err = mc.readUntilEOF(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
|
||||
if mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if len(args) != 0 {
|
||||
if !mc.cfg.InterpolateParams {
|
||||
return nil, driver.ErrSkip
|
||||
}
|
||||
// try client-side prepare to reduce roundtrip
|
||||
prepared, err := mc.interpolateParams(query, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query = prepared
|
||||
args = nil
|
||||
}
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comQuery, query)
|
||||
if err == nil {
|
||||
// Read Result
|
||||
var resLen int
|
||||
resLen, err = mc.readResultSetHeaderPacket()
|
||||
if err == nil {
|
||||
rows := new(textRows)
|
||||
rows.mc = mc
|
||||
|
||||
if resLen == 0 {
|
||||
// no columns, no more data
|
||||
return emptyRows{}, nil
|
||||
}
|
||||
// Columns
|
||||
rows.columns, err = mc.readColumns(resLen)
|
||||
return rows, err
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Gets the value of the given MySQL System Variable
|
||||
// The returned byte slice is only valid until the next read
|
||||
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
|
||||
// Send command
|
||||
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err == nil {
|
||||
rows := new(textRows)
|
||||
rows.mc = mc
|
||||
rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
|
||||
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
if err := mc.readUntilEOF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
dest := make([]driver.Value, resLen)
|
||||
if err = rows.readRow(dest); err == nil {
|
||||
return dest[0].([]byte), mc.readUntilEOF()
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
163
vendor/github.com/go-sql-driver/mysql/const.go
generated
vendored
Normal file
163
vendor/github.com/go-sql-driver/mysql/const.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
const (
|
||||
minProtocolVersion byte = 10
|
||||
maxPacketSize = 1<<24 - 1
|
||||
timeFormat = "2006-01-02 15:04:05.999999"
|
||||
)
|
||||
|
||||
// MySQL constants documentation:
|
||||
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
|
||||
|
||||
const (
|
||||
iOK byte = 0x00
|
||||
iLocalInFile byte = 0xfb
|
||||
iEOF byte = 0xfe
|
||||
iERR byte = 0xff
|
||||
)
|
||||
|
||||
// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
|
||||
type clientFlag uint32
|
||||
|
||||
const (
|
||||
clientLongPassword clientFlag = 1 << iota
|
||||
clientFoundRows
|
||||
clientLongFlag
|
||||
clientConnectWithDB
|
||||
clientNoSchema
|
||||
clientCompress
|
||||
clientODBC
|
||||
clientLocalFiles
|
||||
clientIgnoreSpace
|
||||
clientProtocol41
|
||||
clientInteractive
|
||||
clientSSL
|
||||
clientIgnoreSIGPIPE
|
||||
clientTransactions
|
||||
clientReserved
|
||||
clientSecureConn
|
||||
clientMultiStatements
|
||||
clientMultiResults
|
||||
clientPSMultiResults
|
||||
clientPluginAuth
|
||||
clientConnectAttrs
|
||||
clientPluginAuthLenEncClientData
|
||||
clientCanHandleExpiredPasswords
|
||||
clientSessionTrack
|
||||
clientDeprecateEOF
|
||||
)
|
||||
|
||||
const (
|
||||
comQuit byte = iota + 1
|
||||
comInitDB
|
||||
comQuery
|
||||
comFieldList
|
||||
comCreateDB
|
||||
comDropDB
|
||||
comRefresh
|
||||
comShutdown
|
||||
comStatistics
|
||||
comProcessInfo
|
||||
comConnect
|
||||
comProcessKill
|
||||
comDebug
|
||||
comPing
|
||||
comTime
|
||||
comDelayedInsert
|
||||
comChangeUser
|
||||
comBinlogDump
|
||||
comTableDump
|
||||
comConnectOut
|
||||
comRegisterSlave
|
||||
comStmtPrepare
|
||||
comStmtExecute
|
||||
comStmtSendLongData
|
||||
comStmtClose
|
||||
comStmtReset
|
||||
comSetOption
|
||||
comStmtFetch
|
||||
)
|
||||
|
||||
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
|
||||
const (
|
||||
fieldTypeDecimal byte = iota
|
||||
fieldTypeTiny
|
||||
fieldTypeShort
|
||||
fieldTypeLong
|
||||
fieldTypeFloat
|
||||
fieldTypeDouble
|
||||
fieldTypeNULL
|
||||
fieldTypeTimestamp
|
||||
fieldTypeLongLong
|
||||
fieldTypeInt24
|
||||
fieldTypeDate
|
||||
fieldTypeTime
|
||||
fieldTypeDateTime
|
||||
fieldTypeYear
|
||||
fieldTypeNewDate
|
||||
fieldTypeVarChar
|
||||
fieldTypeBit
|
||||
)
|
||||
const (
|
||||
fieldTypeJSON byte = iota + 0xf5
|
||||
fieldTypeNewDecimal
|
||||
fieldTypeEnum
|
||||
fieldTypeSet
|
||||
fieldTypeTinyBLOB
|
||||
fieldTypeMediumBLOB
|
||||
fieldTypeLongBLOB
|
||||
fieldTypeBLOB
|
||||
fieldTypeVarString
|
||||
fieldTypeString
|
||||
fieldTypeGeometry
|
||||
)
|
||||
|
||||
type fieldFlag uint16
|
||||
|
||||
const (
|
||||
flagNotNULL fieldFlag = 1 << iota
|
||||
flagPriKey
|
||||
flagUniqueKey
|
||||
flagMultipleKey
|
||||
flagBLOB
|
||||
flagUnsigned
|
||||
flagZeroFill
|
||||
flagBinary
|
||||
flagEnum
|
||||
flagAutoIncrement
|
||||
flagTimestamp
|
||||
flagSet
|
||||
flagUnknown1
|
||||
flagUnknown2
|
||||
flagUnknown3
|
||||
flagUnknown4
|
||||
)
|
||||
|
||||
// http://dev.mysql.com/doc/internals/en/status-flags.html
|
||||
type statusFlag uint16
|
||||
|
||||
const (
|
||||
statusInTrans statusFlag = 1 << iota
|
||||
statusInAutocommit
|
||||
statusReserved // Not in documentation
|
||||
statusMoreResultsExists
|
||||
statusNoGoodIndexUsed
|
||||
statusNoIndexUsed
|
||||
statusCursorExists
|
||||
statusLastRowSent
|
||||
statusDbDropped
|
||||
statusNoBackslashEscapes
|
||||
statusMetadataChanged
|
||||
statusQueryWasSlow
|
||||
statusPsOutParams
|
||||
statusInTransReadonly
|
||||
statusSessionStateChanged
|
||||
)
|
167
vendor/github.com/go-sql-driver/mysql/driver.go
generated
vendored
Normal file
167
vendor/github.com/go-sql-driver/mysql/driver.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package mysql provides a MySQL driver for Go's database/sql package
|
||||
//
|
||||
// The driver should be used via the database/sql package:
|
||||
//
|
||||
// import "database/sql"
|
||||
// import _ "github.com/go-sql-driver/mysql"
|
||||
//
|
||||
// db, err := sql.Open("mysql", "user:password@/dbname")
|
||||
//
|
||||
// See https://github.com/go-sql-driver/mysql#usage for details
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"net"
|
||||
)
|
||||
|
||||
// MySQLDriver is exported to make the driver directly accessible.
|
||||
// In general the driver is used via the database/sql package.
|
||||
type MySQLDriver struct{}
|
||||
|
||||
// DialFunc is a function which can be used to establish the network connection.
|
||||
// Custom dial functions must be registered with RegisterDial
|
||||
type DialFunc func(addr string) (net.Conn, error)
|
||||
|
||||
var dials map[string]DialFunc
|
||||
|
||||
// RegisterDial registers a custom dial function. It can then be used by the
|
||||
// network address mynet(addr), where mynet is the registered new network.
|
||||
// addr is passed as a parameter to the dial function.
|
||||
func RegisterDial(net string, dial DialFunc) {
|
||||
if dials == nil {
|
||||
dials = make(map[string]DialFunc)
|
||||
}
|
||||
dials[net] = dial
|
||||
}
|
||||
|
||||
// Open new Connection.
|
||||
// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
|
||||
// the DSN string is formated
|
||||
func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
|
||||
var err error
|
||||
|
||||
// New mysqlConn
|
||||
mc := &mysqlConn{
|
||||
maxPacketAllowed: maxPacketSize,
|
||||
maxWriteSize: maxPacketSize - 1,
|
||||
}
|
||||
mc.cfg, err = ParseDSN(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc.parseTime = mc.cfg.ParseTime
|
||||
mc.strict = mc.cfg.Strict
|
||||
|
||||
// Connect to Server
|
||||
if dial, ok := dials[mc.cfg.Net]; ok {
|
||||
mc.netConn, err = dial(mc.cfg.Addr)
|
||||
} else {
|
||||
nd := net.Dialer{Timeout: mc.cfg.Timeout}
|
||||
mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Enable TCP Keepalives on TCP connections
|
||||
if tc, ok := mc.netConn.(*net.TCPConn); ok {
|
||||
if err := tc.SetKeepAlive(true); err != nil {
|
||||
// Don't send COM_QUIT before handshake.
|
||||
mc.netConn.Close()
|
||||
mc.netConn = nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
mc.buf = newBuffer(mc.netConn)
|
||||
|
||||
// Set I/O timeouts
|
||||
mc.buf.timeout = mc.cfg.ReadTimeout
|
||||
mc.writeTimeout = mc.cfg.WriteTimeout
|
||||
|
||||
// Reading Handshake Initialization Packet
|
||||
cipher, err := mc.readInitPacket()
|
||||
if err != nil {
|
||||
mc.cleanup()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send Client Authentication Packet
|
||||
if err = mc.writeAuthPacket(cipher); err != nil {
|
||||
mc.cleanup()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle response to auth packet, switch methods if possible
|
||||
if err = handleAuthResult(mc, cipher); err != nil {
|
||||
// Authentication failed and MySQL has already closed the connection
|
||||
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
|
||||
// Do not send COM_QUIT, just cleanup and return the error.
|
||||
mc.cleanup()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get max allowed packet size
|
||||
maxap, err := mc.getSystemVar("max_allowed_packet")
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
mc.maxPacketAllowed = stringToInt(maxap) - 1
|
||||
if mc.maxPacketAllowed < maxPacketSize {
|
||||
mc.maxWriteSize = mc.maxPacketAllowed
|
||||
}
|
||||
|
||||
// Handle DSN Params
|
||||
err = mc.handleParams()
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
func handleAuthResult(mc *mysqlConn, cipher []byte) error {
|
||||
// Read Result Packet
|
||||
err := mc.readResultOK()
|
||||
if err == nil {
|
||||
return nil // auth successful
|
||||
}
|
||||
|
||||
if mc.cfg == nil {
|
||||
return err // auth failed and retry not possible
|
||||
}
|
||||
|
||||
// Retry auth if configured to do so.
|
||||
if mc.cfg.AllowOldPasswords && err == ErrOldPassword {
|
||||
// Retry with old authentication method. Note: there are edge cases
|
||||
// where this should work but doesn't; this is currently "wontfix":
|
||||
// https://github.com/go-sql-driver/mysql/issues/184
|
||||
if err = mc.writeOldAuthPacket(cipher); err != nil {
|
||||
return err
|
||||
}
|
||||
err = mc.readResultOK()
|
||||
} else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
|
||||
// Retry with clear text password for
|
||||
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
|
||||
// http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
|
||||
if err = mc.writeClearAuthPacket(); err != nil {
|
||||
return err
|
||||
}
|
||||
err = mc.readResultOK()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
sql.Register("mysql", &MySQLDriver{})
|
||||
}
|
1857
vendor/github.com/go-sql-driver/mysql/driver_test.go
generated
vendored
Normal file
1857
vendor/github.com/go-sql-driver/mysql/driver_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
513
vendor/github.com/go-sql-driver/mysql/dsn.go
generated
vendored
Normal file
513
vendor/github.com/go-sql-driver/mysql/dsn.go
generated
vendored
Normal file
|
@ -0,0 +1,513 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
|
||||
errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
|
||||
errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
|
||||
errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
|
||||
)
|
||||
|
||||
// Config is a configuration parsed from a DSN string
|
||||
type Config struct {
|
||||
User string // Username
|
||||
Passwd string // Password (requires User)
|
||||
Net string // Network type
|
||||
Addr string // Network address (requires Net)
|
||||
DBName string // Database name
|
||||
Params map[string]string // Connection parameters
|
||||
Collation string // Connection collation
|
||||
Loc *time.Location // Location for time.Time values
|
||||
TLSConfig string // TLS configuration name
|
||||
tls *tls.Config // TLS configuration
|
||||
Timeout time.Duration // Dial timeout
|
||||
ReadTimeout time.Duration // I/O read timeout
|
||||
WriteTimeout time.Duration // I/O write timeout
|
||||
|
||||
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
|
||||
AllowCleartextPasswords bool // Allows the cleartext client side plugin
|
||||
AllowOldPasswords bool // Allows the old insecure password method
|
||||
ClientFoundRows bool // Return number of matching rows instead of rows changed
|
||||
ColumnsWithAlias bool // Prepend table alias to column names
|
||||
InterpolateParams bool // Interpolate placeholders into query string
|
||||
MultiStatements bool // Allow multiple statements in one query
|
||||
ParseTime bool // Parse time values to time.Time
|
||||
Strict bool // Return warnings as errors
|
||||
}
|
||||
|
||||
// FormatDSN formats the given Config into a DSN string which can be passed to
|
||||
// the driver.
|
||||
func (cfg *Config) FormatDSN() string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
// [username[:password]@]
|
||||
if len(cfg.User) > 0 {
|
||||
buf.WriteString(cfg.User)
|
||||
if len(cfg.Passwd) > 0 {
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(cfg.Passwd)
|
||||
}
|
||||
buf.WriteByte('@')
|
||||
}
|
||||
|
||||
// [protocol[(address)]]
|
||||
if len(cfg.Net) > 0 {
|
||||
buf.WriteString(cfg.Net)
|
||||
if len(cfg.Addr) > 0 {
|
||||
buf.WriteByte('(')
|
||||
buf.WriteString(cfg.Addr)
|
||||
buf.WriteByte(')')
|
||||
}
|
||||
}
|
||||
|
||||
// /dbname
|
||||
buf.WriteByte('/')
|
||||
buf.WriteString(cfg.DBName)
|
||||
|
||||
// [?param1=value1&...¶mN=valueN]
|
||||
hasParam := false
|
||||
|
||||
if cfg.AllowAllFiles {
|
||||
hasParam = true
|
||||
buf.WriteString("?allowAllFiles=true")
|
||||
}
|
||||
|
||||
if cfg.AllowCleartextPasswords {
|
||||
if hasParam {
|
||||
buf.WriteString("&allowCleartextPasswords=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?allowCleartextPasswords=true")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.AllowOldPasswords {
|
||||
if hasParam {
|
||||
buf.WriteString("&allowOldPasswords=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?allowOldPasswords=true")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.ClientFoundRows {
|
||||
if hasParam {
|
||||
buf.WriteString("&clientFoundRows=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?clientFoundRows=true")
|
||||
}
|
||||
}
|
||||
|
||||
if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
|
||||
if hasParam {
|
||||
buf.WriteString("&collation=")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?collation=")
|
||||
}
|
||||
buf.WriteString(col)
|
||||
}
|
||||
|
||||
if cfg.ColumnsWithAlias {
|
||||
if hasParam {
|
||||
buf.WriteString("&columnsWithAlias=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?columnsWithAlias=true")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.InterpolateParams {
|
||||
if hasParam {
|
||||
buf.WriteString("&interpolateParams=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?interpolateParams=true")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Loc != time.UTC && cfg.Loc != nil {
|
||||
if hasParam {
|
||||
buf.WriteString("&loc=")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?loc=")
|
||||
}
|
||||
buf.WriteString(url.QueryEscape(cfg.Loc.String()))
|
||||
}
|
||||
|
||||
if cfg.MultiStatements {
|
||||
if hasParam {
|
||||
buf.WriteString("&multiStatements=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?multiStatements=true")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.ParseTime {
|
||||
if hasParam {
|
||||
buf.WriteString("&parseTime=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?parseTime=true")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.ReadTimeout > 0 {
|
||||
if hasParam {
|
||||
buf.WriteString("&readTimeout=")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?readTimeout=")
|
||||
}
|
||||
buf.WriteString(cfg.ReadTimeout.String())
|
||||
}
|
||||
|
||||
if cfg.Strict {
|
||||
if hasParam {
|
||||
buf.WriteString("&strict=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?strict=true")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Timeout > 0 {
|
||||
if hasParam {
|
||||
buf.WriteString("&timeout=")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?timeout=")
|
||||
}
|
||||
buf.WriteString(cfg.Timeout.String())
|
||||
}
|
||||
|
||||
if len(cfg.TLSConfig) > 0 {
|
||||
if hasParam {
|
||||
buf.WriteString("&tls=")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?tls=")
|
||||
}
|
||||
buf.WriteString(url.QueryEscape(cfg.TLSConfig))
|
||||
}
|
||||
|
||||
if cfg.WriteTimeout > 0 {
|
||||
if hasParam {
|
||||
buf.WriteString("&writeTimeout=")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?writeTimeout=")
|
||||
}
|
||||
buf.WriteString(cfg.WriteTimeout.String())
|
||||
}
|
||||
|
||||
// other params
|
||||
if cfg.Params != nil {
|
||||
for param, value := range cfg.Params {
|
||||
if hasParam {
|
||||
buf.WriteByte('&')
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteByte('?')
|
||||
}
|
||||
|
||||
buf.WriteString(param)
|
||||
buf.WriteByte('=')
|
||||
buf.WriteString(url.QueryEscape(value))
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// ParseDSN parses the DSN string to a Config
|
||||
func ParseDSN(dsn string) (cfg *Config, err error) {
|
||||
// New config with some default values
|
||||
cfg = &Config{
|
||||
Loc: time.UTC,
|
||||
Collation: defaultCollation,
|
||||
}
|
||||
|
||||
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
|
||||
// Find the last '/' (since the password or the net addr might contain a '/')
|
||||
foundSlash := false
|
||||
for i := len(dsn) - 1; i >= 0; i-- {
|
||||
if dsn[i] == '/' {
|
||||
foundSlash = true
|
||||
var j, k int
|
||||
|
||||
// left part is empty if i <= 0
|
||||
if i > 0 {
|
||||
// [username[:password]@][protocol[(address)]]
|
||||
// Find the last '@' in dsn[:i]
|
||||
for j = i; j >= 0; j-- {
|
||||
if dsn[j] == '@' {
|
||||
// username[:password]
|
||||
// Find the first ':' in dsn[:j]
|
||||
for k = 0; k < j; k++ {
|
||||
if dsn[k] == ':' {
|
||||
cfg.Passwd = dsn[k+1 : j]
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.User = dsn[:k]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// [protocol[(address)]]
|
||||
// Find the first '(' in dsn[j+1:i]
|
||||
for k = j + 1; k < i; k++ {
|
||||
if dsn[k] == '(' {
|
||||
// dsn[i-1] must be == ')' if an address is specified
|
||||
if dsn[i-1] != ')' {
|
||||
if strings.ContainsRune(dsn[k+1:i], ')') {
|
||||
return nil, errInvalidDSNUnescaped
|
||||
}
|
||||
return nil, errInvalidDSNAddr
|
||||
}
|
||||
cfg.Addr = dsn[k+1 : i-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.Net = dsn[j+1 : k]
|
||||
}
|
||||
|
||||
// dbname[?param1=value1&...¶mN=valueN]
|
||||
// Find the first '?' in dsn[i+1:]
|
||||
for j = i + 1; j < len(dsn); j++ {
|
||||
if dsn[j] == '?' {
|
||||
if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.DBName = dsn[i+1 : j]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundSlash && len(dsn) > 0 {
|
||||
return nil, errInvalidDSNNoSlash
|
||||
}
|
||||
|
||||
if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
|
||||
return nil, errInvalidDSNUnsafeCollation
|
||||
}
|
||||
|
||||
// Set default network if empty
|
||||
if cfg.Net == "" {
|
||||
cfg.Net = "tcp"
|
||||
}
|
||||
|
||||
// Set default address if empty
|
||||
if cfg.Addr == "" {
|
||||
switch cfg.Net {
|
||||
case "tcp":
|
||||
cfg.Addr = "127.0.0.1:3306"
|
||||
case "unix":
|
||||
cfg.Addr = "/tmp/mysql.sock"
|
||||
default:
|
||||
return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseDSNParams parses the DSN "query string"
|
||||
// Values must be url.QueryEscape'ed
|
||||
func parseDSNParams(cfg *Config, params string) (err error) {
|
||||
for _, v := range strings.Split(params, "&") {
|
||||
param := strings.SplitN(v, "=", 2)
|
||||
if len(param) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// cfg params
|
||||
switch value := param[1]; param[0] {
|
||||
|
||||
// Disable INFILE whitelist / enable all files
|
||||
case "allowAllFiles":
|
||||
var isBool bool
|
||||
cfg.AllowAllFiles, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Use cleartext authentication mode (MySQL 5.5.10+)
|
||||
case "allowCleartextPasswords":
|
||||
var isBool bool
|
||||
cfg.AllowCleartextPasswords, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Use old authentication mode (pre MySQL 4.1)
|
||||
case "allowOldPasswords":
|
||||
var isBool bool
|
||||
cfg.AllowOldPasswords, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Switch "rowsAffected" mode
|
||||
case "clientFoundRows":
|
||||
var isBool bool
|
||||
cfg.ClientFoundRows, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Collation
|
||||
case "collation":
|
||||
cfg.Collation = value
|
||||
break
|
||||
|
||||
case "columnsWithAlias":
|
||||
var isBool bool
|
||||
cfg.ColumnsWithAlias, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Compression
|
||||
case "compress":
|
||||
return errors.New("compression not implemented yet")
|
||||
|
||||
// Enable client side placeholder substitution
|
||||
case "interpolateParams":
|
||||
var isBool bool
|
||||
cfg.InterpolateParams, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Time Location
|
||||
case "loc":
|
||||
if value, err = url.QueryUnescape(value); err != nil {
|
||||
return
|
||||
}
|
||||
cfg.Loc, err = time.LoadLocation(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// multiple statements in one query
|
||||
case "multiStatements":
|
||||
var isBool bool
|
||||
cfg.MultiStatements, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// time.Time parsing
|
||||
case "parseTime":
|
||||
var isBool bool
|
||||
cfg.ParseTime, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// I/O read Timeout
|
||||
case "readTimeout":
|
||||
cfg.ReadTimeout, err = time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Strict mode
|
||||
case "strict":
|
||||
var isBool bool
|
||||
cfg.Strict, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Dial Timeout
|
||||
case "timeout":
|
||||
cfg.Timeout, err = time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// TLS-Encryption
|
||||
case "tls":
|
||||
boolValue, isBool := readBool(value)
|
||||
if isBool {
|
||||
if boolValue {
|
||||
cfg.TLSConfig = "true"
|
||||
cfg.tls = &tls.Config{}
|
||||
} else {
|
||||
cfg.TLSConfig = "false"
|
||||
}
|
||||
} else if vl := strings.ToLower(value); vl == "skip-verify" {
|
||||
cfg.TLSConfig = vl
|
||||
cfg.tls = &tls.Config{InsecureSkipVerify: true}
|
||||
} else {
|
||||
name, err := url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for TLS config name: %v", err)
|
||||
}
|
||||
|
||||
if tlsConfig, ok := tlsConfigRegister[name]; ok {
|
||||
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
|
||||
host, _, err := net.SplitHostPort(cfg.Addr)
|
||||
if err == nil {
|
||||
tlsConfig.ServerName = host
|
||||
}
|
||||
}
|
||||
|
||||
cfg.TLSConfig = name
|
||||
cfg.tls = tlsConfig
|
||||
} else {
|
||||
return errors.New("invalid value / unknown config name: " + name)
|
||||
}
|
||||
}
|
||||
|
||||
// I/O write Timeout
|
||||
case "writeTimeout":
|
||||
cfg.WriteTimeout, err = time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
// lazy init
|
||||
if cfg.Params == nil {
|
||||
cfg.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
231
vendor/github.com/go-sql-driver/mysql/dsn_test.go
generated
vendored
Normal file
231
vendor/github.com/go-sql-driver/mysql/dsn_test.go
generated
vendored
Normal file
|
@ -0,0 +1,231 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testDSNs = []struct {
|
||||
in string
|
||||
out *Config
|
||||
}{{
|
||||
"username:password@protocol(address)/dbname?param=value",
|
||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC},
|
||||
}, {
|
||||
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
|
||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true},
|
||||
}, {
|
||||
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
|
||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true, MultiStatements: true},
|
||||
}, {
|
||||
"user@unix(/path/to/socket)/dbname?charset=utf8",
|
||||
&Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC},
|
||||
}, {
|
||||
"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
|
||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "true"},
|
||||
}, {
|
||||
"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
|
||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "skip-verify"},
|
||||
}, {
|
||||
"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci",
|
||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true},
|
||||
}, {
|
||||
"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
|
||||
&Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.Local},
|
||||
}, {
|
||||
"/dbname",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC},
|
||||
}, {
|
||||
"@/",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
|
||||
}, {
|
||||
"/",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
|
||||
}, {
|
||||
"",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
|
||||
}, {
|
||||
"user:p@/ssword@/",
|
||||
&Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
|
||||
}, {
|
||||
"unix/?arg=%2Fsome%2Fpath.ext",
|
||||
&Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8_general_ci", Loc: time.UTC},
|
||||
}}
|
||||
|
||||
func TestDSNParser(t *testing.T) {
|
||||
for i, tst := range testDSNs {
|
||||
cfg, err := ParseDSN(tst.in)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
|
||||
// pointer not static
|
||||
cfg.tls = nil
|
||||
|
||||
if !reflect.DeepEqual(cfg, tst.out) {
|
||||
t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNParserInvalid(t *testing.T) {
|
||||
var invalidDSNs = []string{
|
||||
"@net(addr/", // no closing brace
|
||||
"@tcp(/", // no closing brace
|
||||
"tcp(/", // no closing brace
|
||||
"(/", // no closing brace
|
||||
"net(addr)//", // unescaped
|
||||
"User:pass@tcp(1.2.3.4:3306)", // no trailing slash
|
||||
//"/dbname?arg=/some/unescaped/path",
|
||||
}
|
||||
|
||||
for i, tst := range invalidDSNs {
|
||||
if _, err := ParseDSN(tst); err == nil {
|
||||
t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNReformat(t *testing.T) {
|
||||
for i, tst := range testDSNs {
|
||||
dsn1 := tst.in
|
||||
cfg1, err := ParseDSN(dsn1)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
cfg1.tls = nil // pointer not static
|
||||
res1 := fmt.Sprintf("%+v", cfg1)
|
||||
|
||||
dsn2 := cfg1.FormatDSN()
|
||||
cfg2, err := ParseDSN(dsn2)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
cfg2.tls = nil // pointer not static
|
||||
res2 := fmt.Sprintf("%+v", cfg2)
|
||||
|
||||
if res1 != res2 {
|
||||
t.Errorf("%d. %q does not match %q", i, res2, res1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNWithCustomTLS(t *testing.T) {
|
||||
baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
|
||||
tlsCfg := tls.Config{}
|
||||
|
||||
RegisterTLSConfig("utils_test", &tlsCfg)
|
||||
|
||||
// Custom TLS is missing
|
||||
tst := baseDSN + "invalid_tls"
|
||||
cfg, err := ParseDSN(tst)
|
||||
if err == nil {
|
||||
t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
|
||||
}
|
||||
|
||||
tst = baseDSN + "utils_test"
|
||||
|
||||
// Custom TLS with a server name
|
||||
name := "foohost"
|
||||
tlsCfg.ServerName = name
|
||||
cfg, err = ParseDSN(tst)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
} else if cfg.tls.ServerName != name {
|
||||
t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
|
||||
}
|
||||
|
||||
// Custom TLS without a server name
|
||||
name = "localhost"
|
||||
tlsCfg.ServerName = ""
|
||||
cfg, err = ParseDSN(tst)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
} else if cfg.tls.ServerName != name {
|
||||
t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
|
||||
}
|
||||
|
||||
DeregisterTLSConfig("utils_test")
|
||||
}
|
||||
|
||||
func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
|
||||
const configKey = "&%!:"
|
||||
dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
|
||||
name := "foohost"
|
||||
tlsCfg := tls.Config{ServerName: name}
|
||||
|
||||
RegisterTLSConfig(configKey, &tlsCfg)
|
||||
|
||||
cfg, err := ParseDSN(dsn)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
} else if cfg.tls.ServerName != name {
|
||||
t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNUnsafeCollation(t *testing.T) {
|
||||
_, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
|
||||
if err != errInvalidDSNUnsafeCollation {
|
||||
t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=gbk_chinese_ci")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseDSN(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, tst := range testDSNs {
|
||||
if _, err := ParseDSN(tst.in); err != nil {
|
||||
b.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
131
vendor/github.com/go-sql-driver/mysql/errors.go
generated
vendored
Normal file
131
vendor/github.com/go-sql-driver/mysql/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Various errors the driver might return. Can change between driver versions.
|
||||
var (
|
||||
ErrInvalidConn = errors.New("invalid connection")
|
||||
ErrMalformPkt = errors.New("malformed packet")
|
||||
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
|
||||
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
|
||||
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
|
||||
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
|
||||
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
|
||||
ErrPktSync = errors.New("commands out of sync. You can't run this command now")
|
||||
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
|
||||
ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
|
||||
ErrBusyBuffer = errors.New("busy buffer")
|
||||
)
|
||||
|
||||
var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
|
||||
|
||||
// Logger is used to log critical error messages.
|
||||
type Logger interface {
|
||||
Print(v ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger is used to set the logger for critical errors.
|
||||
// The initial logger is os.Stderr.
|
||||
func SetLogger(logger Logger) error {
|
||||
if logger == nil {
|
||||
return errors.New("logger is nil")
|
||||
}
|
||||
errLog = logger
|
||||
return nil
|
||||
}
|
||||
|
||||
// MySQLError is an error type which represents a single MySQL error
|
||||
type MySQLError struct {
|
||||
Number uint16
|
||||
Message string
|
||||
}
|
||||
|
||||
func (me *MySQLError) Error() string {
|
||||
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
|
||||
}
|
||||
|
||||
// MySQLWarnings is an error type which represents a group of one or more MySQL
|
||||
// warnings
|
||||
type MySQLWarnings []MySQLWarning
|
||||
|
||||
func (mws MySQLWarnings) Error() string {
|
||||
var msg string
|
||||
for i, warning := range mws {
|
||||
if i > 0 {
|
||||
msg += "\r\n"
|
||||
}
|
||||
msg += fmt.Sprintf(
|
||||
"%s %s: %s",
|
||||
warning.Level,
|
||||
warning.Code,
|
||||
warning.Message,
|
||||
)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// MySQLWarning is an error type which represents a single MySQL warning.
|
||||
// Warnings are returned in groups only. See MySQLWarnings
|
||||
type MySQLWarning struct {
|
||||
Level string
|
||||
Code string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) getWarnings() (err error) {
|
||||
rows, err := mc.Query("SHOW WARNINGS", nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var warnings = MySQLWarnings{}
|
||||
var values = make([]driver.Value, 3)
|
||||
|
||||
for {
|
||||
err = rows.Next(values)
|
||||
switch err {
|
||||
case nil:
|
||||
warning := MySQLWarning{}
|
||||
|
||||
if raw, ok := values[0].([]byte); ok {
|
||||
warning.Level = string(raw)
|
||||
} else {
|
||||
warning.Level = fmt.Sprintf("%s", values[0])
|
||||
}
|
||||
if raw, ok := values[1].([]byte); ok {
|
||||
warning.Code = string(raw)
|
||||
} else {
|
||||
warning.Code = fmt.Sprintf("%s", values[1])
|
||||
}
|
||||
if raw, ok := values[2].([]byte); ok {
|
||||
warning.Message = string(raw)
|
||||
} else {
|
||||
warning.Message = fmt.Sprintf("%s", values[0])
|
||||
}
|
||||
|
||||
warnings = append(warnings, warning)
|
||||
|
||||
case io.EOF:
|
||||
return warnings
|
||||
|
||||
default:
|
||||
rows.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
42
vendor/github.com/go-sql-driver/mysql/errors_test.go
generated
vendored
Normal file
42
vendor/github.com/go-sql-driver/mysql/errors_test.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestErrorsSetLogger(t *testing.T) {
|
||||
previous := errLog
|
||||
defer func() {
|
||||
errLog = previous
|
||||
}()
|
||||
|
||||
// set up logger
|
||||
const expected = "prefix: test\n"
|
||||
buffer := bytes.NewBuffer(make([]byte, 0, 64))
|
||||
logger := log.New(buffer, "prefix: ", 0)
|
||||
|
||||
// print
|
||||
SetLogger(logger)
|
||||
errLog.Print("test")
|
||||
|
||||
// check result
|
||||
if actual := buffer.String(); actual != expected {
|
||||
t.Errorf("expected %q, got %q", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorsStrictIgnoreNotes(t *testing.T) {
|
||||
runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) {
|
||||
dbt.mustExec("DROP TABLE IF EXISTS does_not_exist")
|
||||
})
|
||||
}
|
181
vendor/github.com/go-sql-driver/mysql/infile.go
generated
vendored
Normal file
181
vendor/github.com/go-sql-driver/mysql/infile.go
generated
vendored
Normal file
|
@ -0,0 +1,181 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
fileRegister map[string]bool
|
||||
fileRegisterLock sync.RWMutex
|
||||
readerRegister map[string]func() io.Reader
|
||||
readerRegisterLock sync.RWMutex
|
||||
)
|
||||
|
||||
// RegisterLocalFile adds the given file to the file whitelist,
|
||||
// so that it can be used by "LOAD DATA LOCAL INFILE <filepath>".
|
||||
// Alternatively you can allow the use of all local files with
|
||||
// the DSN parameter 'allowAllFiles=true'
|
||||
//
|
||||
// filePath := "/home/gopher/data.csv"
|
||||
// mysql.RegisterLocalFile(filePath)
|
||||
// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
|
||||
// if err != nil {
|
||||
// ...
|
||||
//
|
||||
func RegisterLocalFile(filePath string) {
|
||||
fileRegisterLock.Lock()
|
||||
// lazy map init
|
||||
if fileRegister == nil {
|
||||
fileRegister = make(map[string]bool)
|
||||
}
|
||||
|
||||
fileRegister[strings.Trim(filePath, `"`)] = true
|
||||
fileRegisterLock.Unlock()
|
||||
}
|
||||
|
||||
// DeregisterLocalFile removes the given filepath from the whitelist.
|
||||
func DeregisterLocalFile(filePath string) {
|
||||
fileRegisterLock.Lock()
|
||||
delete(fileRegister, strings.Trim(filePath, `"`))
|
||||
fileRegisterLock.Unlock()
|
||||
}
|
||||
|
||||
// RegisterReaderHandler registers a handler function which is used
|
||||
// to receive a io.Reader.
|
||||
// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::<name>".
|
||||
// If the handler returns a io.ReadCloser Close() is called when the
|
||||
// request is finished.
|
||||
//
|
||||
// mysql.RegisterReaderHandler("data", func() io.Reader {
|
||||
// var csvReader io.Reader // Some Reader that returns CSV data
|
||||
// ... // Open Reader here
|
||||
// return csvReader
|
||||
// })
|
||||
// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
|
||||
// if err != nil {
|
||||
// ...
|
||||
//
|
||||
func RegisterReaderHandler(name string, handler func() io.Reader) {
|
||||
readerRegisterLock.Lock()
|
||||
// lazy map init
|
||||
if readerRegister == nil {
|
||||
readerRegister = make(map[string]func() io.Reader)
|
||||
}
|
||||
|
||||
readerRegister[name] = handler
|
||||
readerRegisterLock.Unlock()
|
||||
}
|
||||
|
||||
// DeregisterReaderHandler removes the ReaderHandler function with
|
||||
// the given name from the registry.
|
||||
func DeregisterReaderHandler(name string) {
|
||||
readerRegisterLock.Lock()
|
||||
delete(readerRegister, name)
|
||||
readerRegisterLock.Unlock()
|
||||
}
|
||||
|
||||
func deferredClose(err *error, closer io.Closer) {
|
||||
closeErr := closer.Close()
|
||||
if *err == nil {
|
||||
*err = closeErr
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
|
||||
var rdr io.Reader
|
||||
var data []byte
|
||||
packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
|
||||
if mc.maxWriteSize < packetSize {
|
||||
packetSize = mc.maxWriteSize
|
||||
}
|
||||
|
||||
if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
|
||||
// The server might return an an absolute path. See issue #355.
|
||||
name = name[idx+8:]
|
||||
|
||||
readerRegisterLock.RLock()
|
||||
handler, inMap := readerRegister[name]
|
||||
readerRegisterLock.RUnlock()
|
||||
|
||||
if inMap {
|
||||
rdr = handler()
|
||||
if rdr != nil {
|
||||
if cl, ok := rdr.(io.Closer); ok {
|
||||
defer deferredClose(&err, cl)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("Reader '%s' is <nil>", name)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("Reader '%s' is not registered", name)
|
||||
}
|
||||
} else { // File
|
||||
name = strings.Trim(name, `"`)
|
||||
fileRegisterLock.RLock()
|
||||
fr := fileRegister[name]
|
||||
fileRegisterLock.RUnlock()
|
||||
if mc.cfg.AllowAllFiles || fr {
|
||||
var file *os.File
|
||||
var fi os.FileInfo
|
||||
|
||||
if file, err = os.Open(name); err == nil {
|
||||
defer deferredClose(&err, file)
|
||||
|
||||
// get file size
|
||||
if fi, err = file.Stat(); err == nil {
|
||||
rdr = file
|
||||
if fileSize := int(fi.Size()); fileSize < packetSize {
|
||||
packetSize = fileSize
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("local file '%s' is not registered", name)
|
||||
}
|
||||
}
|
||||
|
||||
// send content packets
|
||||
if err == nil {
|
||||
data := make([]byte, 4+packetSize)
|
||||
var n int
|
||||
for err == nil {
|
||||
n, err = rdr.Read(data[4:])
|
||||
if n > 0 {
|
||||
if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
|
||||
return ioErr
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
// send empty packet (termination)
|
||||
if data == nil {
|
||||
data = make([]byte, 4)
|
||||
}
|
||||
if ioErr := mc.writePacket(data[:4]); ioErr != nil {
|
||||
return ioErr
|
||||
}
|
||||
|
||||
// read OK packet
|
||||
if err == nil {
|
||||
return mc.readResultOK()
|
||||
}
|
||||
|
||||
mc.readPacket()
|
||||
return err
|
||||
}
|
1246
vendor/github.com/go-sql-driver/mysql/packets.go
generated
vendored
Normal file
1246
vendor/github.com/go-sql-driver/mysql/packets.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
22
vendor/github.com/go-sql-driver/mysql/result.go
generated
vendored
Normal file
22
vendor/github.com/go-sql-driver/mysql/result.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
type mysqlResult struct {
|
||||
affectedRows int64
|
||||
insertId int64
|
||||
}
|
||||
|
||||
func (res *mysqlResult) LastInsertId() (int64, error) {
|
||||
return res.insertId, nil
|
||||
}
|
||||
|
||||
func (res *mysqlResult) RowsAffected() (int64, error) {
|
||||
return res.affectedRows, nil
|
||||
}
|
112
vendor/github.com/go-sql-driver/mysql/rows.go
generated
vendored
Normal file
112
vendor/github.com/go-sql-driver/mysql/rows.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"io"
|
||||
)
|
||||
|
||||
type mysqlField struct {
|
||||
tableName string
|
||||
name string
|
||||
flags fieldFlag
|
||||
fieldType byte
|
||||
decimals byte
|
||||
}
|
||||
|
||||
type mysqlRows struct {
|
||||
mc *mysqlConn
|
||||
columns []mysqlField
|
||||
}
|
||||
|
||||
type binaryRows struct {
|
||||
mysqlRows
|
||||
}
|
||||
|
||||
type textRows struct {
|
||||
mysqlRows
|
||||
}
|
||||
|
||||
type emptyRows struct{}
|
||||
|
||||
func (rows *mysqlRows) Columns() []string {
|
||||
columns := make([]string, len(rows.columns))
|
||||
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
|
||||
for i := range columns {
|
||||
if tableName := rows.columns[i].tableName; len(tableName) > 0 {
|
||||
columns[i] = tableName + "." + rows.columns[i].name
|
||||
} else {
|
||||
columns[i] = rows.columns[i].name
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := range columns {
|
||||
columns[i] = rows.columns[i].name
|
||||
}
|
||||
}
|
||||
return columns
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) Close() error {
|
||||
mc := rows.mc
|
||||
if mc == nil {
|
||||
return nil
|
||||
}
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
|
||||
// Remove unread packets from stream
|
||||
err := mc.readUntilEOF()
|
||||
if err == nil {
|
||||
if err = mc.discardResults(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rows.mc = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (rows *binaryRows) Next(dest []driver.Value) error {
|
||||
if mc := rows.mc; mc != nil {
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
|
||||
// Fetch next row from stream
|
||||
return rows.readRow(dest)
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (rows *textRows) Next(dest []driver.Value) error {
|
||||
if mc := rows.mc; mc != nil {
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
|
||||
// Fetch next row from stream
|
||||
return rows.readRow(dest)
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (rows emptyRows) Columns() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows emptyRows) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows emptyRows) Next(dest []driver.Value) error {
|
||||
return io.EOF
|
||||
}
|
150
vendor/github.com/go-sql-driver/mysql/statement.go
generated
vendored
Normal file
150
vendor/github.com/go-sql-driver/mysql/statement.go
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type mysqlStmt struct {
|
||||
mc *mysqlConn
|
||||
id uint32
|
||||
paramCount int
|
||||
columns []mysqlField // cached from the first query
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Close() error {
|
||||
if stmt.mc == nil || stmt.mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
|
||||
err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
|
||||
stmt.mc = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) NumInput() int {
|
||||
return stmt.paramCount
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
|
||||
return converter{}
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
if stmt.mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := stmt.writeExecutePacket(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mc := stmt.mc
|
||||
|
||||
mc.affectedRows = 0
|
||||
mc.insertId = 0
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err == nil {
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
err = mc.readUntilEOF()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Rows
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
if err == nil {
|
||||
return &mysqlResult{
|
||||
affectedRows: int64(mc.affectedRows),
|
||||
insertId: int64(mc.insertId),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
if stmt.mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := stmt.writeExecutePacket(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mc := stmt.mc
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rows := new(binaryRows)
|
||||
|
||||
if resLen > 0 {
|
||||
rows.mc = mc
|
||||
// Columns
|
||||
// If not cached, read them and cache them
|
||||
if stmt.columns == nil {
|
||||
rows.columns, err = mc.readColumns(resLen)
|
||||
stmt.columns = rows.columns
|
||||
} else {
|
||||
rows.columns = stmt.columns
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
}
|
||||
|
||||
return rows, err
|
||||
}
|
||||
|
||||
type converter struct{}
|
||||
|
||||
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
|
||||
if driver.IsValue(v) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// indirect pointers
|
||||
if rv.IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
return c.ConvertValue(rv.Elem().Interface())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int(), nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return int64(rv.Uint()), nil
|
||||
case reflect.Uint64:
|
||||
u64 := rv.Uint()
|
||||
if u64 >= 1<<63 {
|
||||
return strconv.FormatUint(u64, 10), nil
|
||||
}
|
||||
return int64(u64), nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
|
||||
}
|
31
vendor/github.com/go-sql-driver/mysql/transaction.go
generated
vendored
Normal file
31
vendor/github.com/go-sql-driver/mysql/transaction.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
type mysqlTx struct {
|
||||
mc *mysqlConn
|
||||
}
|
||||
|
||||
func (tx *mysqlTx) Commit() (err error) {
|
||||
if tx.mc == nil || tx.mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
err = tx.mc.exec("COMMIT")
|
||||
tx.mc = nil
|
||||
return
|
||||
}
|
||||
|
||||
func (tx *mysqlTx) Rollback() (err error) {
|
||||
if tx.mc == nil || tx.mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
err = tx.mc.exec("ROLLBACK")
|
||||
tx.mc = nil
|
||||
return
|
||||
}
|
740
vendor/github.com/go-sql-driver/mysql/utils.go
generated
vendored
Normal file
740
vendor/github.com/go-sql-driver/mysql/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,740 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/tls"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
|
||||
)
|
||||
|
||||
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
|
||||
// Use the key as a value in the DSN where tls=value.
|
||||
//
|
||||
// rootCertPool := x509.NewCertPool()
|
||||
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
||||
// log.Fatal("Failed to append PEM.")
|
||||
// }
|
||||
// clientCert := make([]tls.Certificate, 0, 1)
|
||||
// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// clientCert = append(clientCert, certs)
|
||||
// mysql.RegisterTLSConfig("custom", &tls.Config{
|
||||
// RootCAs: rootCertPool,
|
||||
// Certificates: clientCert,
|
||||
// })
|
||||
// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
|
||||
//
|
||||
func RegisterTLSConfig(key string, config *tls.Config) error {
|
||||
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
|
||||
return fmt.Errorf("key '%s' is reserved", key)
|
||||
}
|
||||
|
||||
if tlsConfigRegister == nil {
|
||||
tlsConfigRegister = make(map[string]*tls.Config)
|
||||
}
|
||||
|
||||
tlsConfigRegister[key] = config
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeregisterTLSConfig removes the tls.Config associated with key.
|
||||
func DeregisterTLSConfig(key string) {
|
||||
if tlsConfigRegister != nil {
|
||||
delete(tlsConfigRegister, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the bool value of the input.
|
||||
// The 2nd return value indicates if the input was a valid bool value
|
||||
func readBool(input string) (value bool, valid bool) {
|
||||
switch input {
|
||||
case "1", "true", "TRUE", "True":
|
||||
return true, true
|
||||
case "0", "false", "FALSE", "False":
|
||||
return false, true
|
||||
}
|
||||
|
||||
// Not a valid bool value
|
||||
return
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Authentication *
|
||||
******************************************************************************/
|
||||
|
||||
// Encrypt password using 4.1+ method
|
||||
func scramblePassword(scramble, password []byte) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stage1Hash = SHA1(password)
|
||||
crypt := sha1.New()
|
||||
crypt.Write(password)
|
||||
stage1 := crypt.Sum(nil)
|
||||
|
||||
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
|
||||
// inner Hash
|
||||
crypt.Reset()
|
||||
crypt.Write(stage1)
|
||||
hash := crypt.Sum(nil)
|
||||
|
||||
// outer Hash
|
||||
crypt.Reset()
|
||||
crypt.Write(scramble)
|
||||
crypt.Write(hash)
|
||||
scramble = crypt.Sum(nil)
|
||||
|
||||
// token = scrambleHash XOR stage1Hash
|
||||
for i := range scramble {
|
||||
scramble[i] ^= stage1[i]
|
||||
}
|
||||
return scramble
|
||||
}
|
||||
|
||||
// Encrypt password using pre 4.1 (old password) method
|
||||
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
|
||||
type myRnd struct {
|
||||
seed1, seed2 uint32
|
||||
}
|
||||
|
||||
const myRndMaxVal = 0x3FFFFFFF
|
||||
|
||||
// Pseudo random number generator
|
||||
func newMyRnd(seed1, seed2 uint32) *myRnd {
|
||||
return &myRnd{
|
||||
seed1: seed1 % myRndMaxVal,
|
||||
seed2: seed2 % myRndMaxVal,
|
||||
}
|
||||
}
|
||||
|
||||
// Tested to be equivalent to MariaDB's floating point variant
|
||||
// http://play.golang.org/p/QHvhd4qved
|
||||
// http://play.golang.org/p/RG0q4ElWDx
|
||||
func (r *myRnd) NextByte() byte {
|
||||
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
|
||||
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
|
||||
|
||||
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
|
||||
}
|
||||
|
||||
// Generate binary hash from byte string using insecure pre 4.1 method
|
||||
func pwHash(password []byte) (result [2]uint32) {
|
||||
var add uint32 = 7
|
||||
var tmp uint32
|
||||
|
||||
result[0] = 1345345333
|
||||
result[1] = 0x12345671
|
||||
|
||||
for _, c := range password {
|
||||
// skip spaces and tabs in password
|
||||
if c == ' ' || c == '\t' {
|
||||
continue
|
||||
}
|
||||
|
||||
tmp = uint32(c)
|
||||
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
|
||||
result[1] += (result[1] << 8) ^ result[0]
|
||||
add += tmp
|
||||
}
|
||||
|
||||
// Remove sign bit (1<<31)-1)
|
||||
result[0] &= 0x7FFFFFFF
|
||||
result[1] &= 0x7FFFFFFF
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Encrypt password using insecure pre 4.1 method
|
||||
func scrambleOldPassword(scramble, password []byte) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
scramble = scramble[:8]
|
||||
|
||||
hashPw := pwHash(password)
|
||||
hashSc := pwHash(scramble)
|
||||
|
||||
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
|
||||
|
||||
var out [8]byte
|
||||
for i := range out {
|
||||
out[i] = r.NextByte() + 64
|
||||
}
|
||||
|
||||
mask := r.NextByte()
|
||||
for i := range out {
|
||||
out[i] ^= mask
|
||||
}
|
||||
|
||||
return out[:]
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Time related utils *
|
||||
******************************************************************************/
|
||||
|
||||
// NullTime represents a time.Time that may be NULL.
|
||||
// NullTime implements the Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var nt NullTime
|
||||
// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
|
||||
// ...
|
||||
// if nt.Valid {
|
||||
// // use nt.Time
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
// This NullTime implementation is not driver-specific
|
||||
type NullTime struct {
|
||||
Time time.Time
|
||||
Valid bool // Valid is true if Time is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
// The value type must be time.Time or string / []byte (formatted time-string),
|
||||
// otherwise Scan fails.
|
||||
func (nt *NullTime) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
nt.Time, nt.Valid = time.Time{}, false
|
||||
return
|
||||
}
|
||||
|
||||
switch v := value.(type) {
|
||||
case time.Time:
|
||||
nt.Time, nt.Valid = v, true
|
||||
return
|
||||
case []byte:
|
||||
nt.Time, err = parseDateTime(string(v), time.UTC)
|
||||
nt.Valid = (err == nil)
|
||||
return
|
||||
case string:
|
||||
nt.Time, err = parseDateTime(v, time.UTC)
|
||||
nt.Valid = (err == nil)
|
||||
return
|
||||
}
|
||||
|
||||
nt.Valid = false
|
||||
return fmt.Errorf("Can't convert %T to time.Time", value)
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nt NullTime) Value() (driver.Value, error) {
|
||||
if !nt.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return nt.Time, nil
|
||||
}
|
||||
|
||||
func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
|
||||
base := "0000-00-00 00:00:00.0000000"
|
||||
switch len(str) {
|
||||
case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
|
||||
if str == base[:len(str)] {
|
||||
return
|
||||
}
|
||||
t, err = time.Parse(timeFormat[:len(str)], str)
|
||||
default:
|
||||
err = fmt.Errorf("invalid time string: %s", str)
|
||||
return
|
||||
}
|
||||
|
||||
// Adjust location
|
||||
if err == nil && loc != time.UTC {
|
||||
y, mo, d := t.Date()
|
||||
h, mi, s := t.Clock()
|
||||
t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
|
||||
switch num {
|
||||
case 0:
|
||||
return time.Time{}, nil
|
||||
case 4:
|
||||
return time.Date(
|
||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
||||
time.Month(data[2]), // month
|
||||
int(data[3]), // day
|
||||
0, 0, 0, 0,
|
||||
loc,
|
||||
), nil
|
||||
case 7:
|
||||
return time.Date(
|
||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
||||
time.Month(data[2]), // month
|
||||
int(data[3]), // day
|
||||
int(data[4]), // hour
|
||||
int(data[5]), // minutes
|
||||
int(data[6]), // seconds
|
||||
0,
|
||||
loc,
|
||||
), nil
|
||||
case 11:
|
||||
return time.Date(
|
||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
||||
time.Month(data[2]), // month
|
||||
int(data[3]), // day
|
||||
int(data[4]), // hour
|
||||
int(data[5]), // minutes
|
||||
int(data[6]), // seconds
|
||||
int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
|
||||
loc,
|
||||
), nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
|
||||
}
|
||||
|
||||
// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
|
||||
// if the DATE or DATETIME has the zero value.
|
||||
// It must never be changed.
|
||||
// The current behavior depends on database/sql copying the result.
|
||||
var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
|
||||
|
||||
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
|
||||
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
|
||||
|
||||
func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
|
||||
// length expects the deterministic length of the zero value,
|
||||
// negative time and 100+ hours are automatically added if needed
|
||||
if len(src) == 0 {
|
||||
if justTime {
|
||||
return zeroDateTime[11 : 11+length], nil
|
||||
}
|
||||
return zeroDateTime[:length], nil
|
||||
}
|
||||
var dst []byte // return value
|
||||
var pt, p1, p2, p3 byte // current digit pair
|
||||
var zOffs byte // offset of value in zeroDateTime
|
||||
if justTime {
|
||||
switch length {
|
||||
case
|
||||
8, // time (can be up to 10 when negative and 100+ hours)
|
||||
10, 11, 12, 13, 14, 15: // time with fractional seconds
|
||||
default:
|
||||
return nil, fmt.Errorf("illegal TIME length %d", length)
|
||||
}
|
||||
switch len(src) {
|
||||
case 8, 12:
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
|
||||
}
|
||||
// +2 to enable negative time and 100+ hours
|
||||
dst = make([]byte, 0, length+2)
|
||||
if src[0] == 1 {
|
||||
dst = append(dst, '-')
|
||||
}
|
||||
if src[1] != 0 {
|
||||
hour := uint16(src[1])*24 + uint16(src[5])
|
||||
pt = byte(hour / 100)
|
||||
p1 = byte(hour - 100*uint16(pt))
|
||||
dst = append(dst, digits01[pt])
|
||||
} else {
|
||||
p1 = src[5]
|
||||
}
|
||||
zOffs = 11
|
||||
src = src[6:]
|
||||
} else {
|
||||
switch length {
|
||||
case 10, 19, 21, 22, 23, 24, 25, 26:
|
||||
default:
|
||||
t := "DATE"
|
||||
if length > 10 {
|
||||
t += "TIME"
|
||||
}
|
||||
return nil, fmt.Errorf("illegal %s length %d", t, length)
|
||||
}
|
||||
switch len(src) {
|
||||
case 4, 7, 11:
|
||||
default:
|
||||
t := "DATE"
|
||||
if length > 10 {
|
||||
t += "TIME"
|
||||
}
|
||||
return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
|
||||
}
|
||||
dst = make([]byte, 0, length)
|
||||
// start with the date
|
||||
year := binary.LittleEndian.Uint16(src[:2])
|
||||
pt = byte(year / 100)
|
||||
p1 = byte(year - 100*uint16(pt))
|
||||
p2, p3 = src[2], src[3]
|
||||
dst = append(dst,
|
||||
digits10[pt], digits01[pt],
|
||||
digits10[p1], digits01[p1], '-',
|
||||
digits10[p2], digits01[p2], '-',
|
||||
digits10[p3], digits01[p3],
|
||||
)
|
||||
if length == 10 {
|
||||
return dst, nil
|
||||
}
|
||||
if len(src) == 4 {
|
||||
return append(dst, zeroDateTime[10:length]...), nil
|
||||
}
|
||||
dst = append(dst, ' ')
|
||||
p1 = src[4] // hour
|
||||
src = src[5:]
|
||||
}
|
||||
// p1 is 2-digit hour, src is after hour
|
||||
p2, p3 = src[0], src[1]
|
||||
dst = append(dst,
|
||||
digits10[p1], digits01[p1], ':',
|
||||
digits10[p2], digits01[p2], ':',
|
||||
digits10[p3], digits01[p3],
|
||||
)
|
||||
if length <= byte(len(dst)) {
|
||||
return dst, nil
|
||||
}
|
||||
src = src[2:]
|
||||
if len(src) == 0 {
|
||||
return append(dst, zeroDateTime[19:zOffs+length]...), nil
|
||||
}
|
||||
microsecs := binary.LittleEndian.Uint32(src[:4])
|
||||
p1 = byte(microsecs / 10000)
|
||||
microsecs -= 10000 * uint32(p1)
|
||||
p2 = byte(microsecs / 100)
|
||||
microsecs -= 100 * uint32(p2)
|
||||
p3 = byte(microsecs)
|
||||
switch decimals := zOffs + length - 20; decimals {
|
||||
default:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
digits10[p3], digits01[p3],
|
||||
), nil
|
||||
case 1:
|
||||
return append(dst, '.',
|
||||
digits10[p1],
|
||||
), nil
|
||||
case 2:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
), nil
|
||||
case 3:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2],
|
||||
), nil
|
||||
case 4:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
), nil
|
||||
case 5:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
digits10[p3],
|
||||
), nil
|
||||
}
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Convert from and to bytes *
|
||||
******************************************************************************/
|
||||
|
||||
func uint64ToBytes(n uint64) []byte {
|
||||
return []byte{
|
||||
byte(n),
|
||||
byte(n >> 8),
|
||||
byte(n >> 16),
|
||||
byte(n >> 24),
|
||||
byte(n >> 32),
|
||||
byte(n >> 40),
|
||||
byte(n >> 48),
|
||||
byte(n >> 56),
|
||||
}
|
||||
}
|
||||
|
||||
func uint64ToString(n uint64) []byte {
|
||||
var a [20]byte
|
||||
i := 20
|
||||
|
||||
// U+0030 = 0
|
||||
// ...
|
||||
// U+0039 = 9
|
||||
|
||||
var q uint64
|
||||
for n >= 10 {
|
||||
i--
|
||||
q = n / 10
|
||||
a[i] = uint8(n-q*10) + 0x30
|
||||
n = q
|
||||
}
|
||||
|
||||
i--
|
||||
a[i] = uint8(n) + 0x30
|
||||
|
||||
return a[i:]
|
||||
}
|
||||
|
||||
// treats string value as unsigned integer representation
|
||||
func stringToInt(b []byte) int {
|
||||
val := 0
|
||||
for i := range b {
|
||||
val *= 10
|
||||
val += int(b[i] - 0x30)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// returns the string read as a bytes slice, wheter the value is NULL,
|
||||
// the number of bytes read and an error, in case the string is longer than
|
||||
// the input slice
|
||||
func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
|
||||
// Get length
|
||||
num, isNull, n := readLengthEncodedInteger(b)
|
||||
if num < 1 {
|
||||
return b[n:n], isNull, n, nil
|
||||
}
|
||||
|
||||
n += int(num)
|
||||
|
||||
// Check data length
|
||||
if len(b) >= n {
|
||||
return b[n-int(num) : n], false, n, nil
|
||||
}
|
||||
return nil, false, n, io.EOF
|
||||
}
|
||||
|
||||
// returns the number of bytes skipped and an error, in case the string is
|
||||
// longer than the input slice
|
||||
func skipLengthEncodedString(b []byte) (int, error) {
|
||||
// Get length
|
||||
num, _, n := readLengthEncodedInteger(b)
|
||||
if num < 1 {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
n += int(num)
|
||||
|
||||
// Check data length
|
||||
if len(b) >= n {
|
||||
return n, nil
|
||||
}
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
// returns the number read, whether the value is NULL and the number of bytes read
|
||||
func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
|
||||
// See issue #349
|
||||
if len(b) == 0 {
|
||||
return 0, true, 1
|
||||
}
|
||||
switch b[0] {
|
||||
|
||||
// 251: NULL
|
||||
case 0xfb:
|
||||
return 0, true, 1
|
||||
|
||||
// 252: value of following 2
|
||||
case 0xfc:
|
||||
return uint64(b[1]) | uint64(b[2])<<8, false, 3
|
||||
|
||||
// 253: value of following 3
|
||||
case 0xfd:
|
||||
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
|
||||
|
||||
// 254: value of following 8
|
||||
case 0xfe:
|
||||
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
|
||||
uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
|
||||
uint64(b[7])<<48 | uint64(b[8])<<56,
|
||||
false, 9
|
||||
}
|
||||
|
||||
// 0-250: value of first byte
|
||||
return uint64(b[0]), false, 1
|
||||
}
|
||||
|
||||
// encodes a uint64 value and appends it to the given bytes slice
|
||||
func appendLengthEncodedInteger(b []byte, n uint64) []byte {
|
||||
switch {
|
||||
case n <= 250:
|
||||
return append(b, byte(n))
|
||||
|
||||
case n <= 0xffff:
|
||||
return append(b, 0xfc, byte(n), byte(n>>8))
|
||||
|
||||
case n <= 0xffffff:
|
||||
return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
|
||||
}
|
||||
return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
|
||||
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
|
||||
}
|
||||
|
||||
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
|
||||
// If cap(buf) is not enough, reallocate new buffer.
|
||||
func reserveBuffer(buf []byte, appendSize int) []byte {
|
||||
newSize := len(buf) + appendSize
|
||||
if cap(buf) < newSize {
|
||||
// Grow buffer exponentially
|
||||
newBuf := make([]byte, len(buf)*2+appendSize)
|
||||
copy(newBuf, buf)
|
||||
buf = newBuf
|
||||
}
|
||||
return buf[:newSize]
|
||||
}
|
||||
|
||||
// escapeBytesBackslash escapes []byte with backslashes (\)
|
||||
// This escapes the contents of a string (provided as []byte) by adding backslashes before special
|
||||
// characters, and turning others into specific escape sequences, such as
|
||||
// turning newlines into \n and null bytes into \0.
|
||||
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
|
||||
func escapeBytesBackslash(buf, v []byte) []byte {
|
||||
pos := len(buf)
|
||||
buf = reserveBuffer(buf, len(v)*2)
|
||||
|
||||
for _, c := range v {
|
||||
switch c {
|
||||
case '\x00':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '0'
|
||||
pos += 2
|
||||
case '\n':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'n'
|
||||
pos += 2
|
||||
case '\r':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'r'
|
||||
pos += 2
|
||||
case '\x1a':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'Z'
|
||||
pos += 2
|
||||
case '\'':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '\''
|
||||
pos += 2
|
||||
case '"':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '"'
|
||||
pos += 2
|
||||
case '\\':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '\\'
|
||||
pos += 2
|
||||
default:
|
||||
buf[pos] = c
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
return buf[:pos]
|
||||
}
|
||||
|
||||
// escapeStringBackslash is similar to escapeBytesBackslash but for string.
|
||||
func escapeStringBackslash(buf []byte, v string) []byte {
|
||||
pos := len(buf)
|
||||
buf = reserveBuffer(buf, len(v)*2)
|
||||
|
||||
for i := 0; i < len(v); i++ {
|
||||
c := v[i]
|
||||
switch c {
|
||||
case '\x00':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '0'
|
||||
pos += 2
|
||||
case '\n':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'n'
|
||||
pos += 2
|
||||
case '\r':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'r'
|
||||
pos += 2
|
||||
case '\x1a':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'Z'
|
||||
pos += 2
|
||||
case '\'':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '\''
|
||||
pos += 2
|
||||
case '"':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '"'
|
||||
pos += 2
|
||||
case '\\':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '\\'
|
||||
pos += 2
|
||||
default:
|
||||
buf[pos] = c
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
return buf[:pos]
|
||||
}
|
||||
|
||||
// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
|
||||
// This escapes the contents of a string by doubling up any apostrophes that
|
||||
// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
|
||||
// effect on the server.
|
||||
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
|
||||
func escapeBytesQuotes(buf, v []byte) []byte {
|
||||
pos := len(buf)
|
||||
buf = reserveBuffer(buf, len(v)*2)
|
||||
|
||||
for _, c := range v {
|
||||
if c == '\'' {
|
||||
buf[pos] = '\''
|
||||
buf[pos+1] = '\''
|
||||
pos += 2
|
||||
} else {
|
||||
buf[pos] = c
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
return buf[:pos]
|
||||
}
|
||||
|
||||
// escapeStringQuotes is similar to escapeBytesQuotes but for string.
|
||||
func escapeStringQuotes(buf []byte, v string) []byte {
|
||||
pos := len(buf)
|
||||
buf = reserveBuffer(buf, len(v)*2)
|
||||
|
||||
for i := 0; i < len(v); i++ {
|
||||
c := v[i]
|
||||
if c == '\'' {
|
||||
buf[pos] = '\''
|
||||
buf[pos+1] = '\''
|
||||
pos += 2
|
||||
} else {
|
||||
buf[pos] = c
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
return buf[:pos]
|
||||
}
|
197
vendor/github.com/go-sql-driver/mysql/utils_test.go
generated
vendored
Normal file
197
vendor/github.com/go-sql-driver/mysql/utils_test.go
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestScanNullTime(t *testing.T) {
|
||||
var scanTests = []struct {
|
||||
in interface{}
|
||||
error bool
|
||||
valid bool
|
||||
time time.Time
|
||||
}{
|
||||
{tDate, false, true, tDate},
|
||||
{sDate, false, true, tDate},
|
||||
{[]byte(sDate), false, true, tDate},
|
||||
{tDateTime, false, true, tDateTime},
|
||||
{sDateTime, false, true, tDateTime},
|
||||
{[]byte(sDateTime), false, true, tDateTime},
|
||||
{tDate0, false, true, tDate0},
|
||||
{sDate0, false, true, tDate0},
|
||||
{[]byte(sDate0), false, true, tDate0},
|
||||
{sDateTime0, false, true, tDate0},
|
||||
{[]byte(sDateTime0), false, true, tDate0},
|
||||
{"", true, false, tDate0},
|
||||
{"1234", true, false, tDate0},
|
||||
{0, true, false, tDate0},
|
||||
}
|
||||
|
||||
var nt = NullTime{}
|
||||
var err error
|
||||
|
||||
for _, tst := range scanTests {
|
||||
err = nt.Scan(tst.in)
|
||||
if (err != nil) != tst.error {
|
||||
t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
|
||||
}
|
||||
if nt.Valid != tst.valid {
|
||||
t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
|
||||
}
|
||||
if nt.Time != tst.time {
|
||||
t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLengthEncodedInteger(t *testing.T) {
|
||||
var integerTests = []struct {
|
||||
num uint64
|
||||
encoded []byte
|
||||
}{
|
||||
{0x0000000000000000, []byte{0x00}},
|
||||
{0x0000000000000012, []byte{0x12}},
|
||||
{0x00000000000000fa, []byte{0xfa}},
|
||||
{0x0000000000000100, []byte{0xfc, 0x00, 0x01}},
|
||||
{0x0000000000001234, []byte{0xfc, 0x34, 0x12}},
|
||||
{0x000000000000ffff, []byte{0xfc, 0xff, 0xff}},
|
||||
{0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}},
|
||||
{0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}},
|
||||
{0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}},
|
||||
{0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}},
|
||||
{0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}},
|
||||
{0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
|
||||
}
|
||||
|
||||
for _, tst := range integerTests {
|
||||
num, isNull, numLen := readLengthEncodedInteger(tst.encoded)
|
||||
if isNull {
|
||||
t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num)
|
||||
}
|
||||
if num != tst.num {
|
||||
t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num)
|
||||
}
|
||||
if numLen != len(tst.encoded) {
|
||||
t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen)
|
||||
}
|
||||
encoded := appendLengthEncodedInteger(nil, num)
|
||||
if !bytes.Equal(encoded, tst.encoded) {
|
||||
t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOldPass(t *testing.T) {
|
||||
scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2}
|
||||
vectors := []struct {
|
||||
pass string
|
||||
out string
|
||||
}{
|
||||
{" pass", "47575c5a435b4251"},
|
||||
{"pass ", "47575c5a435b4251"},
|
||||
{"123\t456", "575c47505b5b5559"},
|
||||
{"C0mpl!ca ted#PASS123", "5d5d554849584a45"},
|
||||
}
|
||||
for _, tuple := range vectors {
|
||||
ours := scrambleOldPassword(scramble, []byte(tuple.pass))
|
||||
if tuple.out != fmt.Sprintf("%x", ours) {
|
||||
t.Errorf("Failed old password %q", tuple.pass)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatBinaryDateTime(t *testing.T) {
|
||||
rawDate := [11]byte{}
|
||||
binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years
|
||||
rawDate[2] = 12 // months
|
||||
rawDate[3] = 30 // days
|
||||
rawDate[4] = 15 // hours
|
||||
rawDate[5] = 46 // minutes
|
||||
rawDate[6] = 23 // seconds
|
||||
binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
|
||||
expect := func(expected string, inlen, outlen uint8) {
|
||||
actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false)
|
||||
bytes, ok := actual.([]byte)
|
||||
if !ok {
|
||||
t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
|
||||
}
|
||||
if string(bytes) != expected {
|
||||
t.Errorf(
|
||||
"expected %q, got %q for length in %d, out %d",
|
||||
bytes, actual, inlen, outlen,
|
||||
)
|
||||
}
|
||||
}
|
||||
expect("0000-00-00", 0, 10)
|
||||
expect("0000-00-00 00:00:00", 0, 19)
|
||||
expect("1978-12-30", 4, 10)
|
||||
expect("1978-12-30 15:46:23", 7, 19)
|
||||
expect("1978-12-30 15:46:23.987654", 11, 26)
|
||||
}
|
||||
|
||||
func TestEscapeBackslash(t *testing.T) {
|
||||
expect := func(expected, value string) {
|
||||
actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
|
||||
if actual != expected {
|
||||
t.Errorf(
|
||||
"expected %s, got %s",
|
||||
expected, actual,
|
||||
)
|
||||
}
|
||||
|
||||
actual = string(escapeStringBackslash([]byte{}, value))
|
||||
if actual != expected {
|
||||
t.Errorf(
|
||||
"expected %s, got %s",
|
||||
expected, actual,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
expect("foo\\0bar", "foo\x00bar")
|
||||
expect("foo\\nbar", "foo\nbar")
|
||||
expect("foo\\rbar", "foo\rbar")
|
||||
expect("foo\\Zbar", "foo\x1abar")
|
||||
expect("foo\\\"bar", "foo\"bar")
|
||||
expect("foo\\\\bar", "foo\\bar")
|
||||
expect("foo\\'bar", "foo'bar")
|
||||
}
|
||||
|
||||
func TestEscapeQuotes(t *testing.T) {
|
||||
expect := func(expected, value string) {
|
||||
actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
|
||||
if actual != expected {
|
||||
t.Errorf(
|
||||
"expected %s, got %s",
|
||||
expected, actual,
|
||||
)
|
||||
}
|
||||
|
||||
actual = string(escapeStringQuotes([]byte{}, value))
|
||||
if actual != expected {
|
||||
t.Errorf(
|
||||
"expected %s, got %s",
|
||||
expected, actual,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
expect("foo\x00bar", "foo\x00bar") // not affected
|
||||
expect("foo\nbar", "foo\nbar") // not affected
|
||||
expect("foo\rbar", "foo\rbar") // not affected
|
||||
expect("foo\x1abar", "foo\x1abar") // not affected
|
||||
expect("foo''bar", "foo'bar") // affected
|
||||
expect("foo\"bar", "foo\"bar") // not affected
|
||||
}
|
4
vendor/github.com/lib/pq/.gitignore
generated
vendored
Normal file
4
vendor/github.com/lib/pq/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
.db
|
||||
*.test
|
||||
*~
|
||||
*.swp
|
73
vendor/github.com/lib/pq/.travis.sh
generated
vendored
Executable file
73
vendor/github.com/lib/pq/.travis.sh
generated
vendored
Executable file
|
@ -0,0 +1,73 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
client_configure() {
|
||||
sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
|
||||
}
|
||||
|
||||
pgdg_repository() {
|
||||
local sourcelist='sources.list.d/postgresql.list'
|
||||
|
||||
curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add -
|
||||
echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist"
|
||||
sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update
|
||||
}
|
||||
|
||||
postgresql_configure() {
|
||||
sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config
|
||||
local all all trust
|
||||
hostnossl all pqgossltest 127.0.0.1/32 reject
|
||||
hostnossl all pqgosslcert 127.0.0.1/32 reject
|
||||
hostssl all pqgossltest 127.0.0.1/32 trust
|
||||
hostssl all pqgosslcert 127.0.0.1/32 cert
|
||||
host all all 127.0.0.1/32 trust
|
||||
hostnossl all pqgossltest ::1/128 reject
|
||||
hostnossl all pqgosslcert ::1/128 reject
|
||||
hostssl all pqgossltest ::1/128 trust
|
||||
hostssl all pqgosslcert ::1/128 cert
|
||||
host all all ::1/128 trust
|
||||
config
|
||||
|
||||
xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates
|
||||
certs/root.crt
|
||||
certs/server.crt
|
||||
certs/server.key
|
||||
certificates
|
||||
|
||||
sort -VCu <<-versions ||
|
||||
$PGVERSION
|
||||
9.2
|
||||
versions
|
||||
sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config
|
||||
ssl_ca_file = 'root.crt'
|
||||
ssl_cert_file = 'server.crt'
|
||||
ssl_key_file = 'server.key'
|
||||
config
|
||||
|
||||
echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null
|
||||
|
||||
sudo service postgresql restart
|
||||
}
|
||||
|
||||
postgresql_install() {
|
||||
xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages
|
||||
postgresql-$PGVERSION
|
||||
postgresql-server-dev-$PGVERSION
|
||||
postgresql-contrib-$PGVERSION
|
||||
packages
|
||||
}
|
||||
|
||||
postgresql_uninstall() {
|
||||
sudo service postgresql stop
|
||||
xargs sudo apt-get -y --purge remove <<-packages
|
||||
libpq-dev
|
||||
libpq5
|
||||
postgresql
|
||||
postgresql-client-common
|
||||
postgresql-common
|
||||
packages
|
||||
sudo rm -rf /var/lib/postgresql
|
||||
}
|
||||
|
||||
$1
|
43
vendor/github.com/lib/pq/.travis.yml
generated
vendored
Normal file
43
vendor/github.com/lib/pq/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
sudo: true
|
||||
|
||||
env:
|
||||
global:
|
||||
- PGUSER=postgres
|
||||
- PQGOSSLTESTS=1
|
||||
- PQSSLCERTTEST_PATH=$PWD/certs
|
||||
- PGHOST=127.0.0.1
|
||||
matrix:
|
||||
- PGVERSION=9.5
|
||||
- PGVERSION=9.4
|
||||
- PGVERSION=9.3
|
||||
- PGVERSION=9.2
|
||||
- PGVERSION=9.1
|
||||
- PGVERSION=9.0
|
||||
|
||||
before_install:
|
||||
- ./.travis.sh postgresql_uninstall
|
||||
- ./.travis.sh pgdg_repository
|
||||
- ./.travis.sh postgresql_install
|
||||
- ./.travis.sh postgresql_configure
|
||||
- ./.travis.sh client_configure
|
||||
- go get golang.org/x/tools/cmd/goimports
|
||||
|
||||
before_script:
|
||||
- createdb pqgotest
|
||||
- createuser -DRS pqgossltest
|
||||
- createuser -DRS pqgosslcert
|
||||
|
||||
script:
|
||||
- >
|
||||
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
|
||||
- go vet ./...
|
||||
- PQTEST_BINARY_PARAMETERS=no go test -v ./...
|
||||
- PQTEST_BINARY_PARAMETERS=yes go test -v ./...
|
29
vendor/github.com/lib/pq/CONTRIBUTING.md
generated
vendored
Normal file
29
vendor/github.com/lib/pq/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
## Contributing to pq
|
||||
|
||||
`pq` has a backlog of pull requests, but contributions are still very
|
||||
much welcome. You can help with patch review, submitting bug reports,
|
||||
or adding new functionality. There is no formal style guide, but
|
||||
please conform to the style of existing code and general Go formatting
|
||||
conventions when submitting patches.
|
||||
|
||||
### Patch review
|
||||
|
||||
Help review existing open pull requests by commenting on the code or
|
||||
proposed functionality.
|
||||
|
||||
### Bug reports
|
||||
|
||||
We appreciate any bug reports, but especially ones with self-contained
|
||||
(doesn't depend on code outside of pq), minimal (can't be simplified
|
||||
further) test cases. It's especially helpful if you can submit a pull
|
||||
request with just the failing test case (you'll probably want to
|
||||
pattern it after the tests in
|
||||
[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go).
|
||||
|
||||
### New functionality
|
||||
|
||||
There are a number of pending patches for new functionality, so
|
||||
additional feature patches will take a while to merge. Still, patches
|
||||
are generally reviewed based on usefulness and complexity in addition
|
||||
to time-in-queue, so if you have a knockout idea, take a shot. Feel
|
||||
free to open an issue discussion your proposed patch beforehand.
|
8
vendor/github.com/lib/pq/LICENSE.md
generated
vendored
Normal file
8
vendor/github.com/lib/pq/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
Copyright (c) 2011-2013, 'pq' Contributors
|
||||
Portions Copyright (C) 2011 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
105
vendor/github.com/lib/pq/README.md
generated
vendored
Normal file
105
vendor/github.com/lib/pq/README.md
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
# pq - A pure Go postgres driver for Go's database/sql package
|
||||
|
||||
[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq)
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/lib/pq
|
||||
|
||||
## Docs
|
||||
|
||||
For detailed documentation and basic usage examples, please see the package
|
||||
documentation at <http://godoc.org/github.com/lib/pq>.
|
||||
|
||||
## Tests
|
||||
|
||||
`go test` is used for testing. A running PostgreSQL server is
|
||||
required, with the ability to log in. The default database to connect
|
||||
to test with is "pqgotest," but it can be overridden using environment
|
||||
variables.
|
||||
|
||||
Example:
|
||||
|
||||
PGHOST=/run/postgresql go test github.com/lib/pq
|
||||
|
||||
Optionally, a benchmark suite can be run as part of the tests:
|
||||
|
||||
PGHOST=/run/postgresql go test -bench .
|
||||
|
||||
## Features
|
||||
|
||||
* SSL
|
||||
* Handles bad connections for `database/sql`
|
||||
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
|
||||
* Scan binary blobs correctly (i.e. `bytea`)
|
||||
* Package for `hstore` support
|
||||
* COPY FROM support
|
||||
* pq.ParseURL for converting urls to connection strings for sql.Open.
|
||||
* Many libpq compatible environment variables
|
||||
* Unix socket support
|
||||
* Notifications: `LISTEN`/`NOTIFY`
|
||||
* pgpass support
|
||||
|
||||
## Future / Things you can help with
|
||||
|
||||
* Better COPY FROM / COPY TO (see discussion in #181)
|
||||
|
||||
## Thank you (alphabetical)
|
||||
|
||||
Some of these contributors are from the original library `bmizerany/pq.go` whose
|
||||
code still exists in here.
|
||||
|
||||
* Andy Balholm (andybalholm)
|
||||
* Ben Berkert (benburkert)
|
||||
* Benjamin Heatwole (bheatwole)
|
||||
* Bill Mill (llimllib)
|
||||
* Bjørn Madsen (aeons)
|
||||
* Blake Gentry (bgentry)
|
||||
* Brad Fitzpatrick (bradfitz)
|
||||
* Charlie Melbye (cmelbye)
|
||||
* Chris Bandy (cbandy)
|
||||
* Chris Gilling (cgilling)
|
||||
* Chris Walsh (cwds)
|
||||
* Dan Sosedoff (sosedoff)
|
||||
* Daniel Farina (fdr)
|
||||
* Eric Chlebek (echlebek)
|
||||
* Eric Garrido (minusnine)
|
||||
* Eric Urban (hydrogen18)
|
||||
* Everyone at The Go Team
|
||||
* Evan Shaw (edsrzf)
|
||||
* Ewan Chou (coocood)
|
||||
* Fazal Majid (fazalmajid)
|
||||
* Federico Romero (federomero)
|
||||
* Fumin (fumin)
|
||||
* Gary Burd (garyburd)
|
||||
* Heroku (heroku)
|
||||
* James Pozdena (jpoz)
|
||||
* Jason McVetta (jmcvetta)
|
||||
* Jeremy Jay (pbnjay)
|
||||
* Joakim Sernbrant (serbaut)
|
||||
* John Gallagher (jgallagher)
|
||||
* Jonathan Rudenberg (titanous)
|
||||
* Joël Stemmer (jstemmer)
|
||||
* Kamil Kisiel (kisielk)
|
||||
* Kelly Dunn (kellydunn)
|
||||
* Keith Rarick (kr)
|
||||
* Kir Shatrov (kirs)
|
||||
* Lann Martin (lann)
|
||||
* Maciek Sakrejda (uhoh-itsmaciek)
|
||||
* Marc Brinkmann (mbr)
|
||||
* Marko Tiikkaja (johto)
|
||||
* Matt Newberry (MattNewberry)
|
||||
* Matt Robenolt (mattrobenolt)
|
||||
* Martin Olsen (martinolsen)
|
||||
* Mike Lewis (mikelikespie)
|
||||
* Nicolas Patry (Narsil)
|
||||
* Oliver Tonnhofer (olt)
|
||||
* Patrick Hayes (phayes)
|
||||
* Paul Hammond (paulhammond)
|
||||
* Ryan Smith (ryandotsmith)
|
||||
* Samuel Stauffer (samuel)
|
||||
* Timothée Peignier (cyberdelia)
|
||||
* Travis Cline (tmc)
|
||||
* TruongSinh Tran-Nguyen (truongsinh)
|
||||
* Yaismel Miranda (ympons)
|
||||
* notedit (notedit)
|
727
vendor/github.com/lib/pq/array.go
generated
vendored
Normal file
727
vendor/github.com/lib/pq/array.go
generated
vendored
Normal file
|
@ -0,0 +1,727 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var typeByteSlice = reflect.TypeOf([]byte{})
|
||||
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
|
||||
var typeSqlScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
|
||||
|
||||
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
|
||||
// slice of any dimension.
|
||||
//
|
||||
// For example:
|
||||
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
|
||||
//
|
||||
// var x []sql.NullInt64
|
||||
// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
|
||||
//
|
||||
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
|
||||
// bound is not one (such as `[0:0]={1}') are not supported.
|
||||
func Array(a interface{}) interface {
|
||||
driver.Valuer
|
||||
sql.Scanner
|
||||
} {
|
||||
switch a := a.(type) {
|
||||
case []bool:
|
||||
return (*BoolArray)(&a)
|
||||
case []float64:
|
||||
return (*Float64Array)(&a)
|
||||
case []int64:
|
||||
return (*Int64Array)(&a)
|
||||
case []string:
|
||||
return (*StringArray)(&a)
|
||||
|
||||
case *[]bool:
|
||||
return (*BoolArray)(a)
|
||||
case *[]float64:
|
||||
return (*Float64Array)(a)
|
||||
case *[]int64:
|
||||
return (*Int64Array)(a)
|
||||
case *[]string:
|
||||
return (*StringArray)(a)
|
||||
}
|
||||
|
||||
return GenericArray{a}
|
||||
}
|
||||
|
||||
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
|
||||
// to override the array delimiter used by GenericArray.
|
||||
type ArrayDelimiter interface {
|
||||
// ArrayDelimiter returns the delimiter character(s) for this element's type.
|
||||
ArrayDelimiter() string
|
||||
}
|
||||
|
||||
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
|
||||
type BoolArray []bool
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *BoolArray) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
|
||||
}
|
||||
|
||||
func (a *BoolArray) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(BoolArray, len(elems))
|
||||
for i, v := range elems {
|
||||
if len(v) != 1 {
|
||||
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
|
||||
}
|
||||
switch v[0] {
|
||||
case 't':
|
||||
b[i] = true
|
||||
case 'f':
|
||||
b[i] = false
|
||||
default:
|
||||
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a BoolArray) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be exactly two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1+2*n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
b[2*i] = ','
|
||||
if a[i] {
|
||||
b[1+2*i] = 't'
|
||||
} else {
|
||||
b[1+2*i] = 'f'
|
||||
}
|
||||
}
|
||||
|
||||
b[0] = '{'
|
||||
b[2*n] = '}'
|
||||
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
|
||||
type ByteaArray [][]byte
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *ByteaArray) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
|
||||
}
|
||||
|
||||
func (a *ByteaArray) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(ByteaArray, len(elems))
|
||||
for i, v := range elems {
|
||||
b[i], err = parseBytea(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface. It uses the "hex" format which
|
||||
// is only supported on PostgreSQL 9.0 or newer.
|
||||
func (a ByteaArray) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, 2*N bytes of quotes,
|
||||
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
|
||||
size := 1 + 6*n
|
||||
for _, x := range a {
|
||||
size += hex.EncodedLen(len(x))
|
||||
}
|
||||
|
||||
b := make([]byte, size)
|
||||
|
||||
for i, s := 0, b; i < n; i++ {
|
||||
o := copy(s, `,"\\x`)
|
||||
o += hex.Encode(s[o:], a[i])
|
||||
s[o] = '"'
|
||||
s = s[o+1:]
|
||||
}
|
||||
|
||||
b[0] = '{'
|
||||
b[size-1] = '}'
|
||||
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// Float64Array represents a one-dimensional array of the PostgreSQL double
|
||||
// precision type.
|
||||
type Float64Array []float64
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *Float64Array) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
|
||||
}
|
||||
|
||||
func (a *Float64Array) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(Float64Array, len(elems))
|
||||
for i, v := range elems {
|
||||
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a Float64Array) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1, 1+2*n)
|
||||
b[0] = '{'
|
||||
|
||||
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, ',')
|
||||
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
|
||||
}
|
||||
|
||||
return string(append(b, '}')), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
|
||||
// an array or slice of any dimension.
|
||||
type GenericArray struct{ A interface{} }
|
||||
|
||||
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
|
||||
var assign func([]byte, reflect.Value) error
|
||||
var del = ","
|
||||
|
||||
// TODO calculate the assign function for other types
|
||||
// TODO repeat this section on the element type of arrays or slices (multidimensional)
|
||||
{
|
||||
if reflect.PtrTo(rt).Implements(typeSqlScanner) {
|
||||
// dest is always addressable because it is an element of a slice.
|
||||
assign = func(src []byte, dest reflect.Value) (err error) {
|
||||
ss := dest.Addr().Interface().(sql.Scanner)
|
||||
if src == nil {
|
||||
err = ss.Scan(nil)
|
||||
} else {
|
||||
err = ss.Scan(src)
|
||||
}
|
||||
return
|
||||
}
|
||||
goto FoundType
|
||||
}
|
||||
|
||||
assign = func([]byte, reflect.Value) error {
|
||||
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
|
||||
}
|
||||
}
|
||||
|
||||
FoundType:
|
||||
|
||||
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
|
||||
del = ad.ArrayDelimiter()
|
||||
}
|
||||
|
||||
return rt, assign, del
|
||||
}
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a GenericArray) Scan(src interface{}) error {
|
||||
dpv := reflect.ValueOf(a.A)
|
||||
switch {
|
||||
case dpv.Kind() != reflect.Ptr:
|
||||
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
|
||||
case dpv.IsNil():
|
||||
return fmt.Errorf("pq: destination %T is nil", a.A)
|
||||
}
|
||||
|
||||
dv := dpv.Elem()
|
||||
switch dv.Kind() {
|
||||
case reflect.Slice:
|
||||
case reflect.Array:
|
||||
default:
|
||||
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
|
||||
}
|
||||
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src, dv)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src), dv)
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
|
||||
}
|
||||
|
||||
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
|
||||
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
|
||||
dims, elems, err := parseArray(src, []byte(del))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO allow multidimensional
|
||||
|
||||
if len(dims) > 1 {
|
||||
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
|
||||
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
|
||||
}
|
||||
|
||||
// Treat a zero-dimensional array like an array with a single dimension of zero.
|
||||
if len(dims) == 0 {
|
||||
dims = append(dims, 0)
|
||||
}
|
||||
|
||||
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice:
|
||||
case reflect.Array:
|
||||
if rt.Len() != dims[i] {
|
||||
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
|
||||
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
|
||||
}
|
||||
default:
|
||||
// TODO handle multidimensional
|
||||
}
|
||||
}
|
||||
|
||||
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
|
||||
for i, e := range elems {
|
||||
if err := assign(e, values.Index(i)); err != nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO handle multidimensional
|
||||
|
||||
switch dv.Kind() {
|
||||
case reflect.Slice:
|
||||
dv.Set(values.Slice(0, dims[0]))
|
||||
case reflect.Array:
|
||||
for i := 0; i < dims[0]; i++ {
|
||||
dv.Index(i).Set(values.Index(i))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a GenericArray) Value() (driver.Value, error) {
|
||||
if a.A == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(a.A)
|
||||
|
||||
if k := rv.Kind(); k != reflect.Array && k != reflect.Slice {
|
||||
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
|
||||
}
|
||||
|
||||
if n := rv.Len(); n > 0 {
|
||||
// There will be at least two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 0, 1+2*n)
|
||||
|
||||
b, _, err := appendArray(b, rv, n)
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
|
||||
type Int64Array []int64
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *Int64Array) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
|
||||
}
|
||||
|
||||
func (a *Int64Array) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(Int64Array, len(elems))
|
||||
for i, v := range elems {
|
||||
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a Int64Array) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1, 1+2*n)
|
||||
b[0] = '{'
|
||||
|
||||
b = strconv.AppendInt(b, a[0], 10)
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, ',')
|
||||
b = strconv.AppendInt(b, a[i], 10)
|
||||
}
|
||||
|
||||
return string(append(b, '}')), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// StringArray represents a one-dimensional array of the PostgreSQL character types.
|
||||
type StringArray []string
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *StringArray) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
|
||||
}
|
||||
|
||||
func (a *StringArray) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(StringArray, len(elems))
|
||||
for i, v := range elems {
|
||||
if b[i] = string(v); v == nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a StringArray) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, 2*N bytes of quotes,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1, 1+3*n)
|
||||
b[0] = '{'
|
||||
|
||||
b = appendArrayQuotedBytes(b, []byte(a[0]))
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, ',')
|
||||
b = appendArrayQuotedBytes(b, []byte(a[i]))
|
||||
}
|
||||
|
||||
return string(append(b, '}')), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// appendArray appends rv to the buffer, returning the extended buffer and
|
||||
// the delimiter used between elements.
|
||||
//
|
||||
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
|
||||
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
|
||||
var del string
|
||||
var err error
|
||||
|
||||
b = append(b, '{')
|
||||
|
||||
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
|
||||
return b, del, err
|
||||
}
|
||||
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, del...)
|
||||
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
|
||||
return b, del, err
|
||||
}
|
||||
}
|
||||
|
||||
return append(b, '}'), del, nil
|
||||
}
|
||||
|
||||
// appendArrayElement appends rv to the buffer, returning the extended buffer
|
||||
// and the delimiter to use before the next element.
|
||||
//
|
||||
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
|
||||
// using driver.DefaultParameterConverter and the resulting []byte or string
|
||||
// is double-quoted.
|
||||
//
|
||||
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
|
||||
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
|
||||
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
|
||||
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
|
||||
if n := rv.Len(); n > 0 {
|
||||
return appendArray(b, rv, n)
|
||||
}
|
||||
|
||||
return b, "", nil
|
||||
}
|
||||
}
|
||||
|
||||
var del string = ","
|
||||
var err error
|
||||
var iv interface{} = rv.Interface()
|
||||
|
||||
if ad, ok := iv.(ArrayDelimiter); ok {
|
||||
del = ad.ArrayDelimiter()
|
||||
}
|
||||
|
||||
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
|
||||
return b, del, err
|
||||
}
|
||||
|
||||
switch v := iv.(type) {
|
||||
case nil:
|
||||
return append(b, "NULL"...), del, nil
|
||||
case []byte:
|
||||
return appendArrayQuotedBytes(b, v), del, nil
|
||||
case string:
|
||||
return appendArrayQuotedBytes(b, []byte(v)), del, nil
|
||||
}
|
||||
|
||||
b, err = appendValue(b, iv)
|
||||
return b, del, err
|
||||
}
|
||||
|
||||
func appendArrayQuotedBytes(b, v []byte) []byte {
|
||||
b = append(b, '"')
|
||||
for {
|
||||
i := bytes.IndexAny(v, `"\`)
|
||||
if i < 0 {
|
||||
b = append(b, v...)
|
||||
break
|
||||
}
|
||||
if i > 0 {
|
||||
b = append(b, v[:i]...)
|
||||
}
|
||||
b = append(b, '\\', v[i])
|
||||
v = v[i+1:]
|
||||
}
|
||||
return append(b, '"')
|
||||
}
|
||||
|
||||
func appendValue(b []byte, v driver.Value) ([]byte, error) {
|
||||
return append(b, encode(nil, v, 0)...), nil
|
||||
}
|
||||
|
||||
// parseArray extracts the dimensions and elements of an array represented in
|
||||
// text format. Only representations emitted by the backend are supported.
|
||||
// Notably, whitespace around brackets and delimiters is significant, and NULL
|
||||
// is case-sensitive.
|
||||
//
|
||||
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
|
||||
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
|
||||
var depth, i int
|
||||
|
||||
if len(src) < 1 || src[0] != '{' {
|
||||
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
|
||||
}
|
||||
|
||||
Open:
|
||||
for i < len(src) {
|
||||
switch src[i] {
|
||||
case '{':
|
||||
depth++
|
||||
i++
|
||||
case '}':
|
||||
elems = make([][]byte, 0)
|
||||
goto Close
|
||||
default:
|
||||
break Open
|
||||
}
|
||||
}
|
||||
dims = make([]int, i)
|
||||
|
||||
Element:
|
||||
for i < len(src) {
|
||||
switch src[i] {
|
||||
case '{':
|
||||
depth++
|
||||
dims[depth-1] = 0
|
||||
i++
|
||||
case '"':
|
||||
var elem = []byte{}
|
||||
var escape bool
|
||||
for i++; i < len(src); i++ {
|
||||
if escape {
|
||||
elem = append(elem, src[i])
|
||||
escape = false
|
||||
} else {
|
||||
switch src[i] {
|
||||
default:
|
||||
elem = append(elem, src[i])
|
||||
case '\\':
|
||||
escape = true
|
||||
case '"':
|
||||
elems = append(elems, elem)
|
||||
i++
|
||||
break Element
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
for start := i; i < len(src); i++ {
|
||||
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
|
||||
elem := src[start:i]
|
||||
if len(elem) == 0 {
|
||||
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
|
||||
}
|
||||
if bytes.Equal(elem, []byte("NULL")) {
|
||||
elem = nil
|
||||
}
|
||||
elems = append(elems, elem)
|
||||
break Element
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i < len(src) {
|
||||
if bytes.HasPrefix(src[i:], del) {
|
||||
dims[depth-1]++
|
||||
i += len(del)
|
||||
goto Element
|
||||
} else if src[i] == '}' {
|
||||
dims[depth-1]++
|
||||
depth--
|
||||
i++
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
|
||||
}
|
||||
}
|
||||
|
||||
Close:
|
||||
for i < len(src) {
|
||||
if src[i] == '}' && depth > 0 {
|
||||
depth--
|
||||
i++
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
|
||||
}
|
||||
}
|
||||
if depth > 0 {
|
||||
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
|
||||
}
|
||||
if err == nil {
|
||||
for _, d := range dims {
|
||||
if (len(elems) % d) != 0 {
|
||||
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
|
||||
dims, elems, err := parseArray(src, del)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dims) > 1 {
|
||||
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
|
||||
}
|
||||
return elems, err
|
||||
}
|
1153
vendor/github.com/lib/pq/array_test.go
generated
vendored
Normal file
1153
vendor/github.com/lib/pq/array_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
435
vendor/github.com/lib/pq/bench_test.go
generated
vendored
Normal file
435
vendor/github.com/lib/pq/bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,435 @@
|
|||
// +build go1.1
|
||||
|
||||
package pq
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq/oid"
|
||||
)
|
||||
|
||||
var (
|
||||
selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'"
|
||||
selectSeriesQuery = "SELECT generate_series(1, 100)"
|
||||
)
|
||||
|
||||
func BenchmarkSelectString(b *testing.B) {
|
||||
var result string
|
||||
benchQuery(b, selectStringQuery, &result)
|
||||
}
|
||||
|
||||
func BenchmarkSelectSeries(b *testing.B) {
|
||||
var result int
|
||||
benchQuery(b, selectSeriesQuery, &result)
|
||||
}
|
||||
|
||||
func benchQuery(b *testing.B, query string, result interface{}) {
|
||||
b.StopTimer()
|
||||
db := openTestConn(b)
|
||||
defer db.Close()
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchQueryLoop(b, db, query, result)
|
||||
}
|
||||
}
|
||||
|
||||
func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) {
|
||||
rows, err := db.Query(query)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
err = rows.Scan(result)
|
||||
if err != nil {
|
||||
b.Fatal("failed to scan", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reading from circularConn yields content[:prefixLen] once, followed by
|
||||
// content[prefixLen:] over and over again. It never returns EOF.
|
||||
type circularConn struct {
|
||||
content string
|
||||
prefixLen int
|
||||
pos int
|
||||
net.Conn // for all other net.Conn methods that will never be called
|
||||
}
|
||||
|
||||
func (r *circularConn) Read(b []byte) (n int, err error) {
|
||||
n = copy(b, r.content[r.pos:])
|
||||
r.pos += n
|
||||
if r.pos >= len(r.content) {
|
||||
r.pos = r.prefixLen
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil }
|
||||
|
||||
func (r *circularConn) Close() error { return nil }
|
||||
|
||||
func fakeConn(content string, prefixLen int) *conn {
|
||||
c := &circularConn{content: content, prefixLen: prefixLen}
|
||||
return &conn{buf: bufio.NewReader(c), c: c}
|
||||
}
|
||||
|
||||
// This benchmark is meant to be the same as BenchmarkSelectString, but takes
|
||||
// out some of the factors this package can't control. The numbers are less noisy,
|
||||
// but also the costs of network communication aren't accurately represented.
|
||||
func BenchmarkMockSelectString(b *testing.B) {
|
||||
b.StopTimer()
|
||||
// taken from a recorded run of BenchmarkSelectString
|
||||
// See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html
|
||||
const response = "1\x00\x00\x00\x04" +
|
||||
"t\x00\x00\x00\x06\x00\x00" +
|
||||
"T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
|
||||
"Z\x00\x00\x00\x05I" +
|
||||
"2\x00\x00\x00\x04" +
|
||||
"D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
|
||||
"C\x00\x00\x00\rSELECT 1\x00" +
|
||||
"Z\x00\x00\x00\x05I" +
|
||||
"3\x00\x00\x00\x04" +
|
||||
"Z\x00\x00\x00\x05I"
|
||||
c := fakeConn(response, 0)
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchMockQuery(b, c, selectStringQuery)
|
||||
}
|
||||
}
|
||||
|
||||
var seriesRowData = func() string {
|
||||
var buf bytes.Buffer
|
||||
for i := 1; i <= 100; i++ {
|
||||
digits := byte(2)
|
||||
if i >= 100 {
|
||||
digits = 3
|
||||
} else if i < 10 {
|
||||
digits = 1
|
||||
}
|
||||
buf.WriteString("D\x00\x00\x00")
|
||||
buf.WriteByte(10 + digits)
|
||||
buf.WriteString("\x00\x01\x00\x00\x00")
|
||||
buf.WriteByte(digits)
|
||||
buf.WriteString(strconv.Itoa(i))
|
||||
}
|
||||
return buf.String()
|
||||
}()
|
||||
|
||||
func BenchmarkMockSelectSeries(b *testing.B) {
|
||||
b.StopTimer()
|
||||
var response = "1\x00\x00\x00\x04" +
|
||||
"t\x00\x00\x00\x06\x00\x00" +
|
||||
"T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
|
||||
"Z\x00\x00\x00\x05I" +
|
||||
"2\x00\x00\x00\x04" +
|
||||
seriesRowData +
|
||||
"C\x00\x00\x00\x0fSELECT 100\x00" +
|
||||
"Z\x00\x00\x00\x05I" +
|
||||
"3\x00\x00\x00\x04" +
|
||||
"Z\x00\x00\x00\x05I"
|
||||
c := fakeConn(response, 0)
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchMockQuery(b, c, selectSeriesQuery)
|
||||
}
|
||||
}
|
||||
|
||||
func benchMockQuery(b *testing.B, c *conn, query string) {
|
||||
stmt, err := c.Prepare(query)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer stmt.Close()
|
||||
rows, err := stmt.Query(nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
var dest [1]driver.Value
|
||||
for {
|
||||
if err := rows.Next(dest[:]); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPreparedSelectString(b *testing.B) {
|
||||
var result string
|
||||
benchPreparedQuery(b, selectStringQuery, &result)
|
||||
}
|
||||
|
||||
func BenchmarkPreparedSelectSeries(b *testing.B) {
|
||||
var result int
|
||||
benchPreparedQuery(b, selectSeriesQuery, &result)
|
||||
}
|
||||
|
||||
func benchPreparedQuery(b *testing.B, query string, result interface{}) {
|
||||
b.StopTimer()
|
||||
db := openTestConn(b)
|
||||
defer db.Close()
|
||||
stmt, err := db.Prepare(query)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer stmt.Close()
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchPreparedQueryLoop(b, db, stmt, result)
|
||||
}
|
||||
}
|
||||
|
||||
func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) {
|
||||
rows, err := stmt.Query()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if !rows.Next() {
|
||||
rows.Close()
|
||||
b.Fatal("no rows")
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&result)
|
||||
if err != nil {
|
||||
b.Fatal("failed to scan")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See the comment for BenchmarkMockSelectString.
|
||||
func BenchmarkMockPreparedSelectString(b *testing.B) {
|
||||
b.StopTimer()
|
||||
const parseResponse = "1\x00\x00\x00\x04" +
|
||||
"t\x00\x00\x00\x06\x00\x00" +
|
||||
"T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
|
||||
"Z\x00\x00\x00\x05I"
|
||||
const responses = parseResponse +
|
||||
"2\x00\x00\x00\x04" +
|
||||
"D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
|
||||
"C\x00\x00\x00\rSELECT 1\x00" +
|
||||
"Z\x00\x00\x00\x05I"
|
||||
c := fakeConn(responses, len(parseResponse))
|
||||
|
||||
stmt, err := c.Prepare(selectStringQuery)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchPreparedMockQuery(b, c, stmt)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMockPreparedSelectSeries(b *testing.B) {
|
||||
b.StopTimer()
|
||||
const parseResponse = "1\x00\x00\x00\x04" +
|
||||
"t\x00\x00\x00\x06\x00\x00" +
|
||||
"T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
|
||||
"Z\x00\x00\x00\x05I"
|
||||
var responses = parseResponse +
|
||||
"2\x00\x00\x00\x04" +
|
||||
seriesRowData +
|
||||
"C\x00\x00\x00\x0fSELECT 100\x00" +
|
||||
"Z\x00\x00\x00\x05I"
|
||||
c := fakeConn(responses, len(parseResponse))
|
||||
|
||||
stmt, err := c.Prepare(selectSeriesQuery)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchPreparedMockQuery(b, c, stmt)
|
||||
}
|
||||
}
|
||||
|
||||
func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) {
|
||||
rows, err := stmt.Query(nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
var dest [1]driver.Value
|
||||
for {
|
||||
if err := rows.Next(dest[:]); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeInt64(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
encode(¶meterStatus{}, int64(1234), oid.T_int8)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeFloat64(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
encode(¶meterStatus{}, 3.14159, oid.T_float8)
|
||||
}
|
||||
}
|
||||
|
||||
var testByteString = []byte("abcdefghijklmnopqrstuvwxyz")
|
||||
|
||||
func BenchmarkEncodeByteaHex(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
encode(¶meterStatus{serverVersion: 90000}, testByteString, oid.T_bytea)
|
||||
}
|
||||
}
|
||||
func BenchmarkEncodeByteaEscape(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
encode(¶meterStatus{serverVersion: 84000}, testByteString, oid.T_bytea)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBool(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
encode(¶meterStatus{}, true, oid.T_bool)
|
||||
}
|
||||
}
|
||||
|
||||
var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local)
|
||||
|
||||
func BenchmarkEncodeTimestamptz(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
encode(¶meterStatus{}, testTimestamptz, oid.T_timestamptz)
|
||||
}
|
||||
}
|
||||
|
||||
var testIntBytes = []byte("1234")
|
||||
|
||||
func BenchmarkDecodeInt64(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
decode(¶meterStatus{}, testIntBytes, oid.T_int8, formatText)
|
||||
}
|
||||
}
|
||||
|
||||
var testFloatBytes = []byte("3.14159")
|
||||
|
||||
func BenchmarkDecodeFloat64(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
decode(¶meterStatus{}, testFloatBytes, oid.T_float8, formatText)
|
||||
}
|
||||
}
|
||||
|
||||
var testBoolBytes = []byte{'t'}
|
||||
|
||||
func BenchmarkDecodeBool(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
decode(¶meterStatus{}, testBoolBytes, oid.T_bool, formatText)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeBool(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
rows, err := db.Query("select true")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07")
|
||||
|
||||
func BenchmarkDecodeTimestamptz(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) {
|
||||
oldProcs := runtime.GOMAXPROCS(0)
|
||||
defer runtime.GOMAXPROCS(oldProcs)
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
globalLocationCache = newLocationCache()
|
||||
|
||||
f := func(wg *sync.WaitGroup, loops int) {
|
||||
defer wg.Done()
|
||||
for i := 0; i < loops; i++ {
|
||||
decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText)
|
||||
}
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
b.ResetTimer()
|
||||
for j := 0; j < 10; j++ {
|
||||
wg.Add(1)
|
||||
go f(wg, b.N/10)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func BenchmarkLocationCache(b *testing.B) {
|
||||
globalLocationCache = newLocationCache()
|
||||
for i := 0; i < b.N; i++ {
|
||||
globalLocationCache.getLocation(rand.Intn(10000))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLocationCacheMultiThread(b *testing.B) {
|
||||
oldProcs := runtime.GOMAXPROCS(0)
|
||||
defer runtime.GOMAXPROCS(oldProcs)
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
globalLocationCache = newLocationCache()
|
||||
|
||||
f := func(wg *sync.WaitGroup, loops int) {
|
||||
defer wg.Done()
|
||||
for i := 0; i < loops; i++ {
|
||||
globalLocationCache.getLocation(rand.Intn(10000))
|
||||
}
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
b.ResetTimer()
|
||||
for j := 0; j < 10; j++ {
|
||||
wg.Add(1)
|
||||
go f(wg, b.N/10)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Stress test the performance of parsing results from the wire.
|
||||
func BenchmarkResultParsing(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
db := openTestConn(b)
|
||||
defer db.Close()
|
||||
_, err := db.Exec("BEGIN")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
res, err := db.Query("SELECT generate_series(1, 50000)")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
res.Close()
|
||||
}
|
||||
}
|
91
vendor/github.com/lib/pq/buf.go
generated
vendored
Normal file
91
vendor/github.com/lib/pq/buf.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/lib/pq/oid"
|
||||
)
|
||||
|
||||
type readBuf []byte
|
||||
|
||||
func (b *readBuf) int32() (n int) {
|
||||
n = int(int32(binary.BigEndian.Uint32(*b)))
|
||||
*b = (*b)[4:]
|
||||
return
|
||||
}
|
||||
|
||||
func (b *readBuf) oid() (n oid.Oid) {
|
||||
n = oid.Oid(binary.BigEndian.Uint32(*b))
|
||||
*b = (*b)[4:]
|
||||
return
|
||||
}
|
||||
|
||||
// N.B: this is actually an unsigned 16-bit integer, unlike int32
|
||||
func (b *readBuf) int16() (n int) {
|
||||
n = int(binary.BigEndian.Uint16(*b))
|
||||
*b = (*b)[2:]
|
||||
return
|
||||
}
|
||||
|
||||
func (b *readBuf) string() string {
|
||||
i := bytes.IndexByte(*b, 0)
|
||||
if i < 0 {
|
||||
errorf("invalid message format; expected string terminator")
|
||||
}
|
||||
s := (*b)[:i]
|
||||
*b = (*b)[i+1:]
|
||||
return string(s)
|
||||
}
|
||||
|
||||
func (b *readBuf) next(n int) (v []byte) {
|
||||
v = (*b)[:n]
|
||||
*b = (*b)[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func (b *readBuf) byte() byte {
|
||||
return b.next(1)[0]
|
||||
}
|
||||
|
||||
type writeBuf struct {
|
||||
buf []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func (b *writeBuf) int32(n int) {
|
||||
x := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(x, uint32(n))
|
||||
b.buf = append(b.buf, x...)
|
||||
}
|
||||
|
||||
func (b *writeBuf) int16(n int) {
|
||||
x := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(x, uint16(n))
|
||||
b.buf = append(b.buf, x...)
|
||||
}
|
||||
|
||||
func (b *writeBuf) string(s string) {
|
||||
b.buf = append(b.buf, (s + "\000")...)
|
||||
}
|
||||
|
||||
func (b *writeBuf) byte(c byte) {
|
||||
b.buf = append(b.buf, c)
|
||||
}
|
||||
|
||||
func (b *writeBuf) bytes(v []byte) {
|
||||
b.buf = append(b.buf, v...)
|
||||
}
|
||||
|
||||
func (b *writeBuf) wrap() []byte {
|
||||
p := b.buf[b.pos:]
|
||||
binary.BigEndian.PutUint32(p, uint32(len(p)))
|
||||
return b.buf
|
||||
}
|
||||
|
||||
func (b *writeBuf) next(c byte) {
|
||||
p := b.buf[b.pos:]
|
||||
binary.BigEndian.PutUint32(p, uint32(len(p)))
|
||||
b.pos = len(b.buf) + 1
|
||||
b.buf = append(b.buf, c, 0, 0, 0, 0)
|
||||
}
|
3
vendor/github.com/lib/pq/certs/README
generated
vendored
Normal file
3
vendor/github.com/lib/pq/certs/README
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
This directory contains certificates and private keys for testing some
|
||||
SSL-related functionality in Travis. Do NOT use these certificates for
|
||||
anything other than testing.
|
19
vendor/github.com/lib/pq/certs/bogus_root.crt
generated
vendored
Normal file
19
vendor/github.com/lib/pq/certs/bogus_root.crt
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDBjCCAe6gAwIBAgIQSnDYp/Naet9HOZljF5PuwDANBgkqhkiG9w0BAQsFADAr
|
||||
MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0x
|
||||
NjAyMDcxNjQ0MzdaFw0xNzAyMDYxNjQ0MzdaMCsxEjAQBgNVBAoTCUNvY2tyb2Fj
|
||||
aDEVMBMGA1UEAxMMQ29ja3JvYWNoIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
||||
MIIBCgKCAQEAxdln3/UdgP7ayA/G1kT7upjLe4ERwQjYQ25q0e1+vgsB5jhiirxJ
|
||||
e0+WkhhYu/mwoSAXzvlsbZ2PWFyfdanZeD/Lh6SvIeWXVVaPcWVWL1TEcoN2jr5+
|
||||
E85MMHmbbmaT2he8s6br2tM/UZxyTQ2XRprIzApbDssyw1c0Yufcpu3C6267FLEl
|
||||
IfcWrzDhnluFhthhtGXv3ToD8IuMScMC5qlKBXtKmD1B5x14ngO/ecNJ+OlEi0HU
|
||||
mavK4KWgI2rDXRZ2EnCpyTZdkc3kkRnzKcg653oOjMDRZdrhfIrha+Jq38ACsUmZ
|
||||
Su7Sp5jkIHOCO8Zg+l6GKVSq37dKMapD8wIDAQABoyYwJDAOBgNVHQ8BAf8EBAMC
|
||||
AuQwEgYDVR0TAQH/BAgwBgEB/wIBATANBgkqhkiG9w0BAQsFAAOCAQEAwZ2Tu0Yu
|
||||
rrSVdMdoPEjT1IZd+5OhM/SLzL0ddtvTithRweLHsw2lDQYlXFqr24i3UGZJQ1sp
|
||||
cqSrNwswgLUQT3vWyTjmM51HEb2vMYWKmjZ+sBQYAUP1CadrN/+OTfNGnlF1+B4w
|
||||
IXOzh7EvQmJJnNybLe4a/aRvj1NE2n8Z898B76SVU9WbfKKz8VwLzuIPDqkKcZda
|
||||
lMy5yzthyztV9YjcWs2zVOUGZvGdAhDrvZuUq6mSmxrBEvR2LBOggmVf3tGRT+Ls
|
||||
lW7c9Lrva5zLHuqmoPP07A+vuI9a0D1X44jwGDuPWJ5RnTOQ63Uez12mKNjqleHw
|
||||
DnkwNanuO8dhAA==
|
||||
-----END CERTIFICATE-----
|
69
vendor/github.com/lib/pq/certs/postgresql.crt
generated
vendored
Normal file
69
vendor/github.com/lib/pq/certs/postgresql.crt
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number: 2 (0x2)
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA
|
||||
Validity
|
||||
Not Before: Oct 11 15:10:11 2014 GMT
|
||||
Not After : Oct 8 15:10:11 2024 GMT
|
||||
Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pqgosslcert
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
RSA Public Key: (1024 bit)
|
||||
Modulus (1024 bit):
|
||||
00:e3:8c:06:9a:70:54:51:d1:34:34:83:39:cd:a2:
|
||||
59:0f:05:ed:8d:d8:0e:34:d0:92:f4:09:4d:ee:8c:
|
||||
78:55:49:24:f8:3c:e0:34:58:02:b2:e7:94:58:c1:
|
||||
e8:e5:bb:d1:af:f6:54:c1:40:b1:90:70:79:0d:35:
|
||||
54:9c:8f:16:e9:c2:f0:92:e6:64:49:38:c1:76:f8:
|
||||
47:66:c4:5b:4a:b6:a9:43:ce:c8:be:6c:4d:2b:94:
|
||||
97:3c:55:bc:d1:d0:6e:b7:53:ae:89:5c:4b:6b:86:
|
||||
40:be:c1:ae:1e:64:ce:9c:ae:87:0a:69:e5:c8:21:
|
||||
12:be:ae:1d:f6:45:df:16:a7
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Subject Key Identifier:
|
||||
9B:25:31:63:A2:D8:06:FF:CB:E3:E9:96:FF:0D:BA:DC:12:7D:04:CF
|
||||
X509v3 Authority Key Identifier:
|
||||
keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72
|
||||
|
||||
X509v3 Basic Constraints:
|
||||
CA:FALSE
|
||||
X509v3 Key Usage:
|
||||
Digital Signature, Non Repudiation, Key Encipherment
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
3e:f5:f8:0b:4e:11:bd:00:86:1f:ce:dc:97:02:98:91:11:f5:
|
||||
65:f6:f2:8a:b2:3e:47:92:05:69:28:c9:e9:b4:f7:cf:93:d1:
|
||||
2d:81:5d:00:3c:23:be:da:70:ea:59:e1:2c:d3:25:49:ae:a6:
|
||||
95:54:c1:10:df:23:e3:fe:d6:e4:76:c7:6b:73:ad:1b:34:7c:
|
||||
e2:56:cc:c0:37:ae:c5:7a:11:20:6c:3d:05:0e:99:cd:22:6c:
|
||||
cf:59:a1:da:28:d4:65:ba:7d:2f:2b:3d:69:6d:a6:c1:ae:57:
|
||||
bf:56:64:13:79:f8:48:46:65:eb:81:67:28:0b:7b:de:47:10:
|
||||
b3:80:3c:31:d1:58:94:01:51:4a:c7:c8:1a:01:a8:af:c4:cd:
|
||||
bb:84:a5:d9:8b:b4:b9:a1:64:3e:95:d9:90:1d:d5:3f:67:cc:
|
||||
3b:ba:f5:b4:d1:33:77:ee:c2:d2:3e:7e:c5:66:6e:b7:35:4c:
|
||||
60:57:b0:b8:be:36:c8:f3:d3:95:8c:28:4a:c9:f7:27:a4:0d:
|
||||
e5:96:99:eb:f5:c8:bd:f3:84:6d:ef:02:f9:8a:36:7d:6b:5f:
|
||||
36:68:37:41:d9:74:ae:c6:78:2e:44:86:a1:ad:43:ca:fb:b5:
|
||||
3e:ba:10:23:09:02:ac:62:d1:d0:83:c8:95:b9:e3:5e:30:ff:
|
||||
5b:2b:38:fa
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDEzCCAfugAwIBAgIBAjANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP
|
||||
MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp
|
||||
dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTEwMTFa
|
||||
Fw0yNDEwMDgxNTEwMTFaMGQxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx
|
||||
EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx
|
||||
FDASBgNVBAMTC3BxZ29zc2xjZXJ0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
|
||||
gQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0WAKy55RYwejl
|
||||
u9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+bE0rlJc8VbzR
|
||||
0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQABo1owWDAdBgNV
|
||||
HQ4EFgQUmyUxY6LYBv/L4+mW/w263BJ9BM8wHwYDVR0jBBgwFoAUUpPtHnYKn2VP
|
||||
3hlmwdUiQDXLoHIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQEL
|
||||
BQADggEBAD71+AtOEb0Ahh/O3JcCmJER9WX28oqyPkeSBWkoyem098+T0S2BXQA8
|
||||
I77acOpZ4SzTJUmuppVUwRDfI+P+1uR2x2tzrRs0fOJWzMA3rsV6ESBsPQUOmc0i
|
||||
bM9Zodoo1GW6fS8rPWltpsGuV79WZBN5+EhGZeuBZygLe95HELOAPDHRWJQBUUrH
|
||||
yBoBqK/EzbuEpdmLtLmhZD6V2ZAd1T9nzDu69bTRM3fuwtI+fsVmbrc1TGBXsLi+
|
||||
Nsjz05WMKErJ9yekDeWWmev1yL3zhG3vAvmKNn1rXzZoN0HZdK7GeC5EhqGtQ8r7
|
||||
tT66ECMJAqxi0dCDyJW5414w/1srOPo=
|
||||
-----END CERTIFICATE-----
|
15
vendor/github.com/lib/pq/certs/postgresql.key
generated
vendored
Normal file
15
vendor/github.com/lib/pq/certs/postgresql.key
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICWwIBAAKBgQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0
|
||||
WAKy55RYwejlu9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+
|
||||
bE0rlJc8VbzR0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQAB
|
||||
AoGAM5dM6/kp9P700i8qjOgRPym96Zoh5nGfz/rIE5z/r36NBkdvIg8OVZfR96nH
|
||||
b0b9TOMR5lsPp0sI9yivTWvX6qyvLJRWy2vvx17hXK9NxXUNTAm0PYZUTvCtcPeX
|
||||
RnJpzQKNZQPkFzF0uXBc4CtPK2Vz0+FGvAelrhYAxnw1dIkCQQD+9qaW5QhXjsjb
|
||||
Nl85CmXgxPmGROcgLQCO+omfrjf9UXrituU9Dz6auym5lDGEdMFnkzfr+wpasEy9
|
||||
mf5ZZOhDAkEA5HjXfVGaCtpydOt6hDon/uZsyssCK2lQ7NSuE3vP+sUsYMzIpEoy
|
||||
t3VWXqKbo+g9KNDTP4WEliqp1aiSIylzzQJANPeqzihQnlgEdD4MdD4rwhFJwVIp
|
||||
Le8Lcais1KaN7StzOwxB/XhgSibd2TbnPpw+3bSg5n5lvUdo+e62/31OHwJAU1jS
|
||||
I+F09KikQIr28u3UUWT2IzTT4cpVv1AHAQyV3sG3YsjSGT0IK20eyP9BEBZU2WL0
|
||||
7aNjrvR5aHxKc5FXsQJABsFtyGpgI5X4xufkJZVZ+Mklz2n7iXa+XPatMAHFxAtb
|
||||
EEMt60rngwMjXAzBSC6OYuYogRRAY3UCacNC5VhLYQ==
|
||||
-----END RSA PRIVATE KEY-----
|
24
vendor/github.com/lib/pq/certs/root.crt
generated
vendored
Normal file
24
vendor/github.com/lib/pq/certs/root.crt
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIEAzCCAuugAwIBAgIJANmheROCdW1NMA0GCSqGSIb3DQEBBQUAMF4xCzAJBgNV
|
||||
BAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGExEjAQBgNVBAcTCUxhcyBWZWdhczEaMBgG
|
||||
A1UEChMRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMTBXBxIENBMB4XDTE0MTAx
|
||||
MTE1MDQyOVoXDTI0MTAwODE1MDQyOVowXjELMAkGA1UEBhMCVVMxDzANBgNVBAgT
|
||||
Bk5ldmFkYTESMBAGA1UEBxMJTGFzIFZlZ2FzMRowGAYDVQQKExFnaXRodWIuY29t
|
||||
L2xpYi9wcTEOMAwGA1UEAxMFcHEgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
|
||||
ggEKAoIBAQCV4PxP7ShzWBzUCThcKk3qZtOLtHmszQVtbqhvgTpm1kTRtKBdVMu0
|
||||
pLAHQ3JgJCnAYgH0iZxVGoMP16T3irdgsdC48+nNTFM2T0cCdkfDURGIhSFN47cb
|
||||
Pgy306BcDUD2q7ucW33+dlFSRuGVewocoh4BWM/vMtMvvWzdi4Ag/L/jhb+5wZxZ
|
||||
sWymsadOVSDePEMKOvlCa3EdVwVFV40TVyDb+iWBUivDAYsS2a3KajuJrO6MbZiE
|
||||
Sp2RCIkZS2zFmzWxVRi9ZhzIZhh7EVF9JAaNC3T52jhGUdlRq3YpBTMnd89iOh74
|
||||
6jWXG7wSuPj3haFzyNhmJ0ZUh+2Ynoh1AgMBAAGjgcMwgcAwHQYDVR0OBBYEFFKT
|
||||
7R52Cp9lT94ZZsHVIkA1y6ByMIGQBgNVHSMEgYgwgYWAFFKT7R52Cp9lT94ZZsHV
|
||||
IkA1y6ByoWKkYDBeMQswCQYDVQQGEwJVUzEPMA0GA1UECBMGTmV2YWRhMRIwEAYD
|
||||
VQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdpdGh1Yi5jb20vbGliL3BxMQ4wDAYD
|
||||
VQQDEwVwcSBDQYIJANmheROCdW1NMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
|
||||
BQADggEBAAEhCLWkqJNMI8b4gkbmj5fqQ/4+oO83bZ3w2Oqf6eZ8I8BC4f2NOyE6
|
||||
tRUlq5+aU7eqC1cOAvGjO+YHN/bF/DFpwLlzvUSXt+JP/pYcUjL7v+pIvwqec9hD
|
||||
ndvM4iIbkD/H/OYQ3L+N3W+G1x7AcFIX+bGCb3PzYVQAjxreV6//wgKBosMGFbZo
|
||||
HPxT9RPMun61SViF04H5TNs0derVn1+5eiiYENeAhJzQNyZoOOUuX1X/Inx9bEPh
|
||||
C5vFBtSMgIytPgieRJVWAiMLYsfpIAStrHztRAbBs2DU01LmMgRvHdxgFEKinC/d
|
||||
UHZZQDP+6pT+zADrGhQGXe4eThaO6f0=
|
||||
-----END CERTIFICATE-----
|
81
vendor/github.com/lib/pq/certs/server.crt
generated
vendored
Normal file
81
vendor/github.com/lib/pq/certs/server.crt
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number: 1 (0x1)
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA
|
||||
Validity
|
||||
Not Before: Oct 11 15:05:15 2014 GMT
|
||||
Not After : Oct 8 15:05:15 2024 GMT
|
||||
Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=postgres
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
RSA Public Key: (2048 bit)
|
||||
Modulus (2048 bit):
|
||||
00:d7:8a:4c:85:fb:17:a5:3c:8f:e0:72:11:29:ce:
|
||||
3f:b0:1f:3f:7d:c6:ee:7f:a7:fc:02:2b:35:47:08:
|
||||
a6:3d:90:df:5c:56:14:94:00:c7:6d:d1:d2:e2:61:
|
||||
95:77:b8:e3:a6:66:31:f9:1f:21:7d:62:e1:27:da:
|
||||
94:37:61:4a:ea:63:53:a0:61:b8:9c:bb:a5:e2:e7:
|
||||
b7:a6:d8:0f:05:04:c7:29:e2:ea:49:2b:7f:de:15:
|
||||
00:a6:18:70:50:c7:0c:de:9a:f9:5a:96:b0:e1:94:
|
||||
06:c6:6d:4a:21:3b:b4:0f:a5:6d:92:86:34:b2:4e:
|
||||
d7:0e:a7:19:c0:77:0b:7b:87:c8:92:de:42:ff:86:
|
||||
d2:b7:9a:a4:d4:15:23:ca:ad:a5:69:21:b8:ce:7e:
|
||||
66:cb:85:5d:b9:ed:8b:2d:09:8d:94:e4:04:1e:72:
|
||||
ec:ef:d0:76:90:15:5a:a4:f7:91:4b:e9:ce:4e:9d:
|
||||
5d:9a:70:17:9c:d8:e9:73:83:ea:3d:61:99:a6:cd:
|
||||
ac:91:40:5a:88:77:e5:4e:2a:8e:3d:13:f3:f9:38:
|
||||
6f:81:6b:8a:95:ca:0e:07:ab:6f:da:b4:8c:d9:ff:
|
||||
aa:78:03:aa:c7:c2:cf:6f:64:92:d3:d8:83:d5:af:
|
||||
f1:23:18:a7:2e:7b:17:0b:e7:7d:f1:fa:a8:41:a3:
|
||||
04:57
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Subject Key Identifier:
|
||||
EE:F0:B3:46:DC:C7:09:EB:0E:B6:2F:E5:FE:62:60:45:44:9F:59:CC
|
||||
X509v3 Authority Key Identifier:
|
||||
keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72
|
||||
|
||||
X509v3 Basic Constraints:
|
||||
CA:FALSE
|
||||
X509v3 Key Usage:
|
||||
Digital Signature, Non Repudiation, Key Encipherment
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
7e:5a:6e:be:bf:d2:6c:c1:d6:fa:b6:fb:3f:06:53:36:08:87:
|
||||
9d:95:b1:39:af:9e:f6:47:38:17:39:da:25:7c:f2:ad:0c:e3:
|
||||
ab:74:19:ca:fb:8c:a0:50:c0:1d:19:8a:9c:21:ed:0f:3a:d1:
|
||||
96:54:2e:10:09:4f:b8:70:f7:2b:99:43:d2:c6:15:bc:3f:24:
|
||||
7d:28:39:32:3f:8d:a4:4f:40:75:7f:3e:0d:1c:d1:69:f2:4e:
|
||||
98:83:47:97:d2:25:ac:c9:36:86:2f:04:a6:c4:86:c7:c4:00:
|
||||
5f:7f:b9:ad:fc:bf:e9:f5:78:d7:82:1a:51:0d:fc:ab:9e:92:
|
||||
1d:5f:0c:18:d1:82:e0:14:c9:ce:91:89:71:ff:49:49:ff:35:
|
||||
bf:7b:44:78:42:c1:d0:66:65:bb:28:2e:60:ca:9b:20:12:a9:
|
||||
90:61:b1:96:ec:15:46:c9:37:f7:07:90:8a:89:45:2a:3f:37:
|
||||
ec:dc:e3:e5:8f:c3:3a:57:80:a5:54:60:0c:e1:b2:26:99:2b:
|
||||
40:7e:36:d1:9a:70:02:ec:63:f4:3b:72:ae:81:fb:30:20:6d:
|
||||
cb:48:46:c6:b5:8f:39:b1:84:05:25:55:8d:f5:62:f6:1b:46:
|
||||
2e:da:a3:4c:26:12:44:d7:56:b6:b8:a9:ca:d3:ab:71:45:7c:
|
||||
9f:48:6d:1e
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDlDCCAnygAwIBAgIBATANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP
|
||||
MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp
|
||||
dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTA1MTVa
|
||||
Fw0yNDEwMDgxNTA1MTVaMGExCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx
|
||||
EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx
|
||||
ETAPBgNVBAMTCHBvc3RncmVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
||||
AQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYUlADHbdHS4mGV
|
||||
d7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLqSSt/3hUAphhw
|
||||
UMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C/4bSt5qk1BUj
|
||||
yq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1dmnAXnNjpc4Pq
|
||||
PWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOqx8LPb2SS09iD
|
||||
1a/xIxinLnsXC+d98fqoQaMEVwIDAQABo1owWDAdBgNVHQ4EFgQU7vCzRtzHCesO
|
||||
ti/l/mJgRUSfWcwwHwYDVR0jBBgwFoAUUpPtHnYKn2VP3hlmwdUiQDXLoHIwCQYD
|
||||
VR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQELBQADggEBAH5abr6/0mzB
|
||||
1vq2+z8GUzYIh52VsTmvnvZHOBc52iV88q0M46t0Gcr7jKBQwB0Zipwh7Q860ZZU
|
||||
LhAJT7hw9yuZQ9LGFbw/JH0oOTI/jaRPQHV/Pg0c0WnyTpiDR5fSJazJNoYvBKbE
|
||||
hsfEAF9/ua38v+n1eNeCGlEN/Kuekh1fDBjRguAUyc6RiXH/SUn/Nb97RHhCwdBm
|
||||
ZbsoLmDKmyASqZBhsZbsFUbJN/cHkIqJRSo/N+zc4+WPwzpXgKVUYAzhsiaZK0B+
|
||||
NtGacALsY/Q7cq6B+zAgbctIRsa1jzmxhAUlVY31YvYbRi7ao0wmEkTXVra4qcrT
|
||||
q3FFfJ9IbR4=
|
||||
-----END CERTIFICATE-----
|
27
vendor/github.com/lib/pq/certs/server.key
generated
vendored
Normal file
27
vendor/github.com/lib/pq/certs/server.key
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYU
|
||||
lADHbdHS4mGVd7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLq
|
||||
SSt/3hUAphhwUMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C
|
||||
/4bSt5qk1BUjyq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1d
|
||||
mnAXnNjpc4PqPWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOq
|
||||
x8LPb2SS09iD1a/xIxinLnsXC+d98fqoQaMEVwIDAQABAoIBAF3ZoihUhJ82F4+r
|
||||
Gz4QyDpv4L1reT2sb1aiabhcU8ZK5nbWJG+tRyjSS/i2dNaEcttpdCj9HR/zhgZM
|
||||
bm0OuAgG58rVwgS80CZUruq++Qs+YVojq8/gWPTiQD4SNhV2Fmx3HkwLgUk3oxuT
|
||||
SsvdqzGE3okGVrutCIcgy126eA147VPMoej1Bb3fO6npqK0pFPhZfAc0YoqJuM+k
|
||||
obRm5pAnGUipyLCFXjA9HYPKwYZw2RtfdA3CiImHeanSdqS+ctrC9y8BV40Th7gZ
|
||||
haXdKUNdjmIxV695QQ1mkGqpKLZFqhzKioGQ2/Ly2d1iaKN9fZltTusu8unepWJ2
|
||||
tlT9qMECgYEA9uHaF1t2CqE+AJvWTihHhPIIuLxoOQXYea1qvxfcH/UMtaLKzCNm
|
||||
lQ5pqCGsPvp+10f36yttO1ZehIvlVNXuJsjt0zJmPtIolNuJY76yeussfQ9jHheB
|
||||
5uPEzCFlHzxYbBUyqgWaF6W74okRGzEGJXjYSP0yHPPdU4ep2q3bGiUCgYEA34Af
|
||||
wBSuQSK7uLxArWHvQhyuvi43ZGXls6oRGl+Ysj54s8BP6XGkq9hEJ6G4yxgyV+BR
|
||||
DUOs5X8/TLT8POuIMYvKTQthQyCk0eLv2FLdESDuuKx0kBVY3s8lK3/z5HhrdOiN
|
||||
VMNZU+xDKgKc3hN9ypkk8vcZe6EtH7Y14e0rVcsCgYBTgxi8F/M5K0wG9rAqphNz
|
||||
VFBA9XKn/2M33cKjO5X5tXIEKzpAjaUQvNxexG04rJGljzG8+mar0M6ONahw5yD1
|
||||
O7i/XWgazgpuOEkkVYiYbd8RutfDgR4vFVMn3hAP3eDnRtBplRWH9Ec3HTiNIys6
|
||||
F8PKBOQjyRZQQC7jyzW3hQKBgACe5HeuFwXLSOYsb6mLmhR+6+VPT4wR1F95W27N
|
||||
USk9jyxAnngxfpmTkiziABdgS9N+pfr5cyN4BP77ia/Jn6kzkC5Cl9SN5KdIkA3z
|
||||
vPVtN/x/ThuQU5zaymmig1ThGLtMYggYOslG4LDfLPxY5YKIhle+Y+259twdr2yf
|
||||
Mf2dAoGAaGv3tWMgnIdGRk6EQL/yb9PKHo7ShN+tKNlGaK7WwzBdKs+Fe8jkgcr7
|
||||
pz4Ne887CmxejdISzOCcdT+Zm9Bx6I/uZwWOtDvWpIgIxVX9a9URj/+D1MxTE/y4
|
||||
d6H+c89yDY62I2+drMpdjCd3EtCaTlxpTbRS+s1eAHMH7aEkcCE=
|
||||
-----END RSA PRIVATE KEY-----
|
1862
vendor/github.com/lib/pq/conn.go
generated
vendored
Normal file
1862
vendor/github.com/lib/pq/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1448
vendor/github.com/lib/pq/conn_test.go
generated
vendored
Normal file
1448
vendor/github.com/lib/pq/conn_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
267
vendor/github.com/lib/pq/copy.go
generated
vendored
Normal file
267
vendor/github.com/lib/pq/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,267 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
|
||||
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
|
||||
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
|
||||
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
|
||||
)
|
||||
|
||||
// CopyIn creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare(). The target table should be visible in search_path.
|
||||
func CopyIn(table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
// CopyInSchema creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare().
|
||||
func CopyInSchema(schema, table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
type copyin struct {
|
||||
cn *conn
|
||||
buffer []byte
|
||||
rowData chan []byte
|
||||
done chan bool
|
||||
|
||||
closed bool
|
||||
|
||||
sync.Mutex // guards err
|
||||
err error
|
||||
}
|
||||
|
||||
const ciBufferSize = 64 * 1024
|
||||
|
||||
// flush buffer before the buffer is filled up and needs reallocation
|
||||
const ciBufferFlushSize = 63 * 1024
|
||||
|
||||
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
|
||||
if !cn.isInTransaction() {
|
||||
return nil, errCopyNotSupportedOutsideTxn
|
||||
}
|
||||
|
||||
ci := ©in{
|
||||
cn: cn,
|
||||
buffer: make([]byte, 0, ciBufferSize),
|
||||
rowData: make(chan []byte),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
// add CopyData identifier + 4 bytes for message length
|
||||
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
|
||||
|
||||
b := cn.writeBuf('Q')
|
||||
b.string(q)
|
||||
cn.send(b)
|
||||
|
||||
awaitCopyInResponse:
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'G':
|
||||
if r.byte() != 0 {
|
||||
err = errBinaryCopyNotSupported
|
||||
break awaitCopyInResponse
|
||||
}
|
||||
go ci.resploop()
|
||||
return ci, nil
|
||||
case 'H':
|
||||
err = errCopyToNotSupported
|
||||
break awaitCopyInResponse
|
||||
case 'E':
|
||||
err = parseError(r)
|
||||
case 'Z':
|
||||
if err == nil {
|
||||
cn.bad = true
|
||||
errorf("unexpected ReadyForQuery in response to COPY")
|
||||
}
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
cn.bad = true
|
||||
errorf("unknown response for copy query: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// something went wrong, abort COPY before we return
|
||||
b = cn.writeBuf('f')
|
||||
b.string(err.Error())
|
||||
cn.send(b)
|
||||
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'c', 'C', 'E':
|
||||
case 'Z':
|
||||
// correctly aborted, we're done
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
cn.bad = true
|
||||
errorf("unknown response for CopyFail: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) flush(buf []byte) {
|
||||
// set message length (without message identifier)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
|
||||
|
||||
_, err := ci.cn.c.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) resploop() {
|
||||
for {
|
||||
var r readBuf
|
||||
t, err := ci.cn.recvMessage(&r)
|
||||
if err != nil {
|
||||
ci.cn.bad = true
|
||||
ci.setError(err)
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
switch t {
|
||||
case 'C':
|
||||
// complete
|
||||
case 'N':
|
||||
// NoticeResponse
|
||||
case 'Z':
|
||||
ci.cn.processReadyForQuery(&r)
|
||||
ci.done <- true
|
||||
return
|
||||
case 'E':
|
||||
err := parseError(&r)
|
||||
ci.setError(err)
|
||||
default:
|
||||
ci.cn.bad = true
|
||||
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) isErrorSet() bool {
|
||||
ci.Lock()
|
||||
isSet := (ci.err != nil)
|
||||
ci.Unlock()
|
||||
return isSet
|
||||
}
|
||||
|
||||
// setError() sets ci.err if one has not been set already. Caller must not be
|
||||
// holding ci.Mutex.
|
||||
func (ci *copyin) setError(err error) {
|
||||
ci.Lock()
|
||||
if ci.err == nil {
|
||||
ci.err = err
|
||||
}
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
// Exec inserts values into the COPY stream. The insert is asynchronous
|
||||
// and Exec can return errors from previous Exec calls to the same
|
||||
// COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
|
||||
if ci.cn.bad {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if ci.isErrorSet() {
|
||||
return nil, ci.err
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
return nil, ci.Close()
|
||||
}
|
||||
|
||||
numValues := len(v)
|
||||
for i, value := range v {
|
||||
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
|
||||
if i < numValues-1 {
|
||||
ci.buffer = append(ci.buffer, '\t')
|
||||
}
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
ci.flush(ci.buffer)
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() (err error) {
|
||||
if ci.closed { // Don't do anything, we're already closed
|
||||
return nil
|
||||
}
|
||||
ci.closed = true
|
||||
|
||||
if ci.cn.bad {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if len(ci.buffer) > 0 {
|
||||
ci.flush(ci.buffer)
|
||||
}
|
||||
// Avoid touching the scratch buffer as resploop could be using it.
|
||||
err = ci.cn.sendSimpleMessage('c')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-ci.done
|
||||
|
||||
if ci.isErrorSet() {
|
||||
err = ci.err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
465
vendor/github.com/lib/pq/copy_test.go
generated
vendored
Normal file
465
vendor/github.com/lib/pq/copy_test.go
generated
vendored
Normal file
|
@ -0,0 +1,465 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCopyInStmt(t *testing.T) {
|
||||
var stmt string
|
||||
stmt = CopyIn("table name")
|
||||
if stmt != `COPY "table name" () FROM STDIN` {
|
||||
t.Fatal(stmt)
|
||||
}
|
||||
|
||||
stmt = CopyIn("table name", "column 1", "column 2")
|
||||
if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` {
|
||||
t.Fatal(stmt)
|
||||
}
|
||||
|
||||
stmt = CopyIn(`table " name """`, `co"lumn""`)
|
||||
if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` {
|
||||
t.Fatal(stmt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyInSchemaStmt(t *testing.T) {
|
||||
var stmt string
|
||||
stmt = CopyInSchema("schema name", "table name")
|
||||
if stmt != `COPY "schema name"."table name" () FROM STDIN` {
|
||||
t.Fatal(stmt)
|
||||
}
|
||||
|
||||
stmt = CopyInSchema("schema name", "table name", "column 1", "column 2")
|
||||
if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` {
|
||||
t.Fatal(stmt)
|
||||
}
|
||||
|
||||
stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`)
|
||||
if stmt != `COPY "schema "" name """"""".`+
|
||||
`"table "" name """"""" ("co""lumn""""") FROM STDIN` {
|
||||
t.Fatal(stmt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyInMultipleValues(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
longString := strings.Repeat("#", 500)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
_, err = stmt.Exec(int64(i), longString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = stmt.Exec()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = stmt.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var num int
|
||||
err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if num != 500 {
|
||||
t.Fatalf("expected 500 items, not %d", num)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyInRaiseStmtTrigger(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
if getServerVersion(t, db) < 90000 {
|
||||
var exists int
|
||||
err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists)
|
||||
if err == sql.ErrNoRows {
|
||||
t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger")
|
||||
} else if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = txn.Exec(`
|
||||
CREATE OR REPLACE FUNCTION pg_temp.temptest()
|
||||
RETURNS trigger AS
|
||||
$BODY$ begin
|
||||
raise notice 'Hello world';
|
||||
return new;
|
||||
end $BODY$
|
||||
LANGUAGE plpgsql`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = txn.Exec(`
|
||||
CREATE TRIGGER temptest_trigger
|
||||
BEFORE INSERT
|
||||
ON temp
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE pg_temp.temptest()`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
longString := strings.Repeat("#", 500)
|
||||
|
||||
_, err = stmt.Exec(int64(1), longString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = stmt.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var num int
|
||||
err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if num != 1 {
|
||||
t.Fatalf("expected 1 items, not %d", num)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyInTypes(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = stmt.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var num int
|
||||
var text string
|
||||
var blob []byte
|
||||
var nothing sql.NullString
|
||||
|
||||
err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, ¬hing)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if num != 1234567890 {
|
||||
t.Fatal("unexpected result", num)
|
||||
}
|
||||
if text != "Héllö\n ☃!\r\t\\" {
|
||||
t.Fatal("unexpected result", text)
|
||||
}
|
||||
if bytes.Compare(blob, []byte{0, 255, 9, 10, 13}) != 0 {
|
||||
t.Fatal("unexpected result", blob)
|
||||
}
|
||||
if nothing.Valid {
|
||||
t.Fatal("unexpected result", nothing.String)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyInWrongType(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stmt, err := txn.Prepare(CopyIn("temp", "num"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
_, err = stmt.Exec("Héllö\n ☃!\r\t\\")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec()
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" {
|
||||
t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyOutsideOfTxnError(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
_, err := db.Prepare(CopyIn("temp", "num"))
|
||||
if err == nil {
|
||||
t.Fatal("COPY outside of transaction did not return an error")
|
||||
}
|
||||
if err != errCopyNotSupportedOutsideTxn {
|
||||
t.Fatalf("expected %s, got %s", err, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyInBinaryError(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary")
|
||||
if err != errBinaryCopyNotSupported {
|
||||
t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err)
|
||||
}
|
||||
// check that the protocol is in a valid state
|
||||
err = txn.Rollback()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyFromError(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = txn.Prepare("COPY temp (num) TO STDOUT")
|
||||
if err != errCopyToNotSupported {
|
||||
t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err)
|
||||
}
|
||||
// check that the protocol is in a valid state
|
||||
err = txn.Rollback()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopySyntaxError(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Prepare("COPY ")
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
if pge := err.(*Error); pge.Code.Name() != "syntax_error" {
|
||||
t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge)
|
||||
}
|
||||
// check that the protocol is in a valid state
|
||||
err = txn.Rollback()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for connection errors in copyin.resploop()
|
||||
func TestCopyRespLoopConnectionError(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
var pid int
|
||||
err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = txn.Exec("CREATE TEMP TABLE temp (a int)")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stmt, err := txn.Prepare(CopyIn("temp", "a"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
_, err = db.Exec("SELECT pg_terminate_backend($1)", pid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if getServerVersion(t, db) < 90500 {
|
||||
// We have to try and send something over, since postgres before
|
||||
// version 9.5 won't process SIGTERMs while it's waiting for
|
||||
// CopyData/CopyEnd messages; see tcop/postgres.c.
|
||||
_, err = stmt.Exec(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
_, err = stmt.Exec()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
pge, ok := err.(*Error)
|
||||
if !ok {
|
||||
if err == driver.ErrBadConn {
|
||||
// likely an EPIPE
|
||||
} else {
|
||||
t.Fatalf("expected *pq.Error or driver.ErrBadConn, got %+#v", err)
|
||||
}
|
||||
} else if pge.Code.Name() != "admin_shutdown" {
|
||||
t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name())
|
||||
}
|
||||
|
||||
_ = stmt.Close()
|
||||
}
|
||||
|
||||
func BenchmarkCopyIn(b *testing.B) {
|
||||
db := openTestConn(b)
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = stmt.Exec(int64(i), "hello world!")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = stmt.Exec()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = stmt.Close()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
var num int
|
||||
err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if num != b.N {
|
||||
b.Fatalf("expected %d items, not %d", b.N, num)
|
||||
}
|
||||
}
|
212
vendor/github.com/lib/pq/doc.go
generated
vendored
Normal file
212
vendor/github.com/lib/pq/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,212 @@
|
|||
/*
|
||||
Package pq is a pure Go Postgres driver for the database/sql package.
|
||||
|
||||
In most cases clients will use the database/sql package instead of
|
||||
using this package directly. For example:
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
age := 21
|
||||
rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
|
||||
…
|
||||
}
|
||||
|
||||
You can also connect to a database using a URL. For example:
|
||||
|
||||
db, err := sql.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full")
|
||||
|
||||
|
||||
Connection String Parameters
|
||||
|
||||
|
||||
Similarly to libpq, when establishing a connection using pq you are expected to
|
||||
supply a connection string containing zero or more parameters.
|
||||
A subset of the connection parameters supported by libpq are also supported by pq.
|
||||
Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
|
||||
directly in the connection string. This is different from libpq, which does not allow
|
||||
run-time parameters in the connection string, instead requiring you to supply
|
||||
them in the options parameter.
|
||||
|
||||
For compatibility with libpq, the following special connection parameters are
|
||||
supported:
|
||||
|
||||
* dbname - The name of the database to connect to
|
||||
* user - The user to sign in as
|
||||
* password - The user's password
|
||||
* host - The host to connect to. Values that start with / are for unix domain sockets. (default is localhost)
|
||||
* port - The port to bind to. (default is 5432)
|
||||
* sslmode - Whether or not to use SSL (default is require, this is not the default for libpq)
|
||||
* fallback_application_name - An application_name to fall back to if one isn't provided.
|
||||
* connect_timeout - Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely.
|
||||
* sslcert - Cert file location. The file must contain PEM encoded data.
|
||||
* sslkey - Key file location. The file must contain PEM encoded data.
|
||||
* sslrootcert - The location of the root certificate file. The file must contain PEM encoded data.
|
||||
|
||||
Valid values for sslmode are:
|
||||
|
||||
* disable - No SSL
|
||||
* require - Always SSL (skip verification)
|
||||
* verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA)
|
||||
* verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate)
|
||||
|
||||
See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
for more information about connection string parameters.
|
||||
|
||||
Use single quotes for values that contain whitespace:
|
||||
|
||||
"user=pqgotest password='with spaces'"
|
||||
|
||||
A backslash will escape the next character in values:
|
||||
|
||||
"user=space\ man password='it\'s valid'
|
||||
|
||||
Note that the connection parameter client_encoding (which sets the
|
||||
text encoding for the connection) may be set but must be "UTF8",
|
||||
matching with the same rules as Postgres. It is an error to provide
|
||||
any other value.
|
||||
|
||||
In addition to the parameters listed above, any run-time parameter that can be
|
||||
set at backend start time can be set in the connection string. For more
|
||||
information, see
|
||||
http://www.postgresql.org/docs/current/static/runtime-config.html.
|
||||
|
||||
Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
|
||||
supported by libpq are also supported by pq. If any of the environment
|
||||
variables not supported by pq are set, pq will panic during connection
|
||||
establishment. Environment variables have a lower precedence than explicitly
|
||||
provided connection parameters.
|
||||
|
||||
The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
|
||||
is supported, but on Windows PGPASSFILE must be specified explicitly.
|
||||
|
||||
Queries
|
||||
|
||||
database/sql does not dictate any specific format for parameter
|
||||
markers in query strings, and pq uses the Postgres-native ordinal markers,
|
||||
as shown above. The same marker can be reused for the same parameter:
|
||||
|
||||
rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
|
||||
OR age BETWEEN $2 AND $2 + 3`, "orange", 64)
|
||||
|
||||
pq does not support the LastInsertId() method of the Result type in database/sql.
|
||||
To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
|
||||
RETURNING clause with a standard Query or QueryRow call:
|
||||
|
||||
var userid int
|
||||
err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
|
||||
VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)
|
||||
|
||||
For more details on RETURNING, see the Postgres documentation:
|
||||
|
||||
http://www.postgresql.org/docs/current/static/sql-insert.html
|
||||
http://www.postgresql.org/docs/current/static/sql-update.html
|
||||
http://www.postgresql.org/docs/current/static/sql-delete.html
|
||||
|
||||
For additional instructions on querying see the documentation for the database/sql package.
|
||||
|
||||
Errors
|
||||
|
||||
pq may return errors of type *pq.Error which can be interrogated for error details:
|
||||
|
||||
if err, ok := err.(*pq.Error); ok {
|
||||
fmt.Println("pq error:", err.Code.Name())
|
||||
}
|
||||
|
||||
See the pq.Error type for details.
|
||||
|
||||
|
||||
Bulk imports
|
||||
|
||||
You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
|
||||
pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
|
||||
handle can then be repeatedly "executed" to copy data into the target table.
|
||||
After all data has been processed you should call Exec() once with no arguments
|
||||
to flush all buffered data. Any call to Exec() might return an error which
|
||||
should be handled appropriately, but because of the internal buffering an error
|
||||
returned by Exec() might not be related to the data passed in the call that
|
||||
failed.
|
||||
|
||||
CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
|
||||
explicit transaction in pq.
|
||||
|
||||
Usage example:
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
_, err = stmt.Exec(user.Name, int64(user.Age))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = stmt.Exec()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = stmt.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = txn.Commit()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
|
||||
Notifications
|
||||
|
||||
|
||||
PostgreSQL supports a simple publish/subscribe model over database
|
||||
connections. See http://www.postgresql.org/docs/current/static/sql-notify.html
|
||||
for more information about the general mechanism.
|
||||
|
||||
To start listening for notifications, you first have to open a new connection
|
||||
to the database by calling NewListener. This connection can not be used for
|
||||
anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
|
||||
channel"; once a notification channel is open, a notification generated on that
|
||||
channel will effect a send on the Listener.Notify channel. A notification
|
||||
channel will remain open until Unlisten is called, though connection loss might
|
||||
result in some notifications being lost. To solve this problem, Listener sends
|
||||
a nil pointer over the Notify channel any time the connection is re-established
|
||||
following a connection loss. The application can get information about the
|
||||
state of the underlying connection by setting an event callback in the call to
|
||||
NewListener.
|
||||
|
||||
A single Listener can safely be used from concurrent goroutines, which means
|
||||
that there is often no need to create more than one Listener in your
|
||||
application. However, a Listener is always connected to a single database, so
|
||||
you will need to create a new Listener instance for every database you want to
|
||||
receive notifications in.
|
||||
|
||||
The channel name in both Listen and Unlisten is case sensitive, and can contain
|
||||
any characters legal in an identifier (see
|
||||
http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
|
||||
for more information). Note that the channel name will be truncated to 63
|
||||
bytes by the PostgreSQL server.
|
||||
|
||||
You can find a complete, working example of Listener usage at
|
||||
http://godoc.org/github.com/lib/pq/listen_example.
|
||||
|
||||
*/
|
||||
package pq
|
589
vendor/github.com/lib/pq/encode.go
generated
vendored
Normal file
589
vendor/github.com/lib/pq/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,589 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq/oid"
|
||||
)
|
||||
|
||||
func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
|
||||
switch v := x.(type) {
|
||||
case []byte:
|
||||
return v
|
||||
default:
|
||||
return encode(parameterStatus, x, oid.T_unknown)
|
||||
}
|
||||
}
|
||||
|
||||
func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
|
||||
switch v := x.(type) {
|
||||
case int64:
|
||||
return strconv.AppendInt(nil, v, 10)
|
||||
case float64:
|
||||
return strconv.AppendFloat(nil, v, 'f', -1, 64)
|
||||
case []byte:
|
||||
if pgtypOid == oid.T_bytea {
|
||||
return encodeBytea(parameterStatus.serverVersion, v)
|
||||
}
|
||||
|
||||
return v
|
||||
case string:
|
||||
if pgtypOid == oid.T_bytea {
|
||||
return encodeBytea(parameterStatus.serverVersion, []byte(v))
|
||||
}
|
||||
|
||||
return []byte(v)
|
||||
case bool:
|
||||
return strconv.AppendBool(nil, v)
|
||||
case time.Time:
|
||||
return formatTs(v)
|
||||
|
||||
default:
|
||||
errorf("encode: unknown type for %T", v)
|
||||
}
|
||||
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
|
||||
switch f {
|
||||
case formatBinary:
|
||||
return binaryDecode(parameterStatus, s, typ)
|
||||
case formatText:
|
||||
return textDecode(parameterStatus, s, typ)
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
|
||||
func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
|
||||
switch typ {
|
||||
case oid.T_bytea:
|
||||
return s
|
||||
case oid.T_int8:
|
||||
return int64(binary.BigEndian.Uint64(s))
|
||||
case oid.T_int4:
|
||||
return int64(int32(binary.BigEndian.Uint32(s)))
|
||||
case oid.T_int2:
|
||||
return int64(int16(binary.BigEndian.Uint16(s)))
|
||||
|
||||
default:
|
||||
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
|
||||
}
|
||||
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
|
||||
switch typ {
|
||||
case oid.T_char, oid.T_varchar, oid.T_text:
|
||||
return string(s)
|
||||
case oid.T_bytea:
|
||||
b, err := parseBytea(s)
|
||||
if err != nil {
|
||||
errorf("%s", err)
|
||||
}
|
||||
return b
|
||||
case oid.T_timestamptz:
|
||||
return parseTs(parameterStatus.currentLocation, string(s))
|
||||
case oid.T_timestamp, oid.T_date:
|
||||
return parseTs(nil, string(s))
|
||||
case oid.T_time:
|
||||
return mustParse("15:04:05", typ, s)
|
||||
case oid.T_timetz:
|
||||
return mustParse("15:04:05-07", typ, s)
|
||||
case oid.T_bool:
|
||||
return s[0] == 't'
|
||||
case oid.T_int8, oid.T_int4, oid.T_int2:
|
||||
i, err := strconv.ParseInt(string(s), 10, 64)
|
||||
if err != nil {
|
||||
errorf("%s", err)
|
||||
}
|
||||
return i
|
||||
case oid.T_float4, oid.T_float8:
|
||||
bits := 64
|
||||
if typ == oid.T_float4 {
|
||||
bits = 32
|
||||
}
|
||||
f, err := strconv.ParseFloat(string(s), bits)
|
||||
if err != nil {
|
||||
errorf("%s", err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// appendEncodedText encodes item in text format as required by COPY
|
||||
// and appends to buf
|
||||
func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
|
||||
switch v := x.(type) {
|
||||
case int64:
|
||||
return strconv.AppendInt(buf, v, 10)
|
||||
case float64:
|
||||
return strconv.AppendFloat(buf, v, 'f', -1, 64)
|
||||
case []byte:
|
||||
encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
|
||||
return appendEscapedText(buf, string(encodedBytea))
|
||||
case string:
|
||||
return appendEscapedText(buf, v)
|
||||
case bool:
|
||||
return strconv.AppendBool(buf, v)
|
||||
case time.Time:
|
||||
return append(buf, formatTs(v)...)
|
||||
case nil:
|
||||
return append(buf, "\\N"...)
|
||||
default:
|
||||
errorf("encode: unknown type for %T", v)
|
||||
}
|
||||
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func appendEscapedText(buf []byte, text string) []byte {
|
||||
escapeNeeded := false
|
||||
startPos := 0
|
||||
var c byte
|
||||
|
||||
// check if we need to escape
|
||||
for i := 0; i < len(text); i++ {
|
||||
c = text[i]
|
||||
if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
|
||||
escapeNeeded = true
|
||||
startPos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if !escapeNeeded {
|
||||
return append(buf, text...)
|
||||
}
|
||||
|
||||
// copy till first char to escape, iterate the rest
|
||||
result := append(buf, text[:startPos]...)
|
||||
for i := startPos; i < len(text); i++ {
|
||||
c = text[i]
|
||||
switch c {
|
||||
case '\\':
|
||||
result = append(result, '\\', '\\')
|
||||
case '\n':
|
||||
result = append(result, '\\', 'n')
|
||||
case '\r':
|
||||
result = append(result, '\\', 'r')
|
||||
case '\t':
|
||||
result = append(result, '\\', 't')
|
||||
default:
|
||||
result = append(result, c)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func mustParse(f string, typ oid.Oid, s []byte) time.Time {
|
||||
str := string(s)
|
||||
|
||||
// check for a 30-minute-offset timezone
|
||||
if (typ == oid.T_timestamptz || typ == oid.T_timetz) &&
|
||||
str[len(str)-3] == ':' {
|
||||
f += ":00"
|
||||
}
|
||||
t, err := time.Parse(f, str)
|
||||
if err != nil {
|
||||
errorf("decode: %s", err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
var errInvalidTimestamp = errors.New("invalid timestamp")
|
||||
|
||||
type timestampParser struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *timestampParser) expect(str string, char byte, pos int) {
|
||||
if p.err != nil {
|
||||
return
|
||||
}
|
||||
if pos+1 > len(str) {
|
||||
p.err = errInvalidTimestamp
|
||||
return
|
||||
}
|
||||
if c := str[pos]; c != char && p.err == nil {
|
||||
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
if begin < 0 || end < 0 || begin > end || end > len(str) {
|
||||
p.err = errInvalidTimestamp
|
||||
return 0
|
||||
}
|
||||
result, err := strconv.Atoi(str[begin:end])
|
||||
if err != nil {
|
||||
if p.err == nil {
|
||||
p.err = fmt.Errorf("expected number; got '%v'", str)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// The location cache caches the time zones typically used by the client.
|
||||
type locationCache struct {
|
||||
cache map[int]*time.Location
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// All connections share the same list of timezones. Benchmarking shows that
|
||||
// about 5% speed could be gained by putting the cache in the connection and
|
||||
// losing the mutex, at the cost of a small amount of memory and a somewhat
|
||||
// significant increase in code complexity.
|
||||
var globalLocationCache = newLocationCache()
|
||||
|
||||
func newLocationCache() *locationCache {
|
||||
return &locationCache{cache: make(map[int]*time.Location)}
|
||||
}
|
||||
|
||||
// Returns the cached timezone for the specified offset, creating and caching
|
||||
// it if necessary.
|
||||
func (c *locationCache) getLocation(offset int) *time.Location {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
location, ok := c.cache[offset]
|
||||
if !ok {
|
||||
location = time.FixedZone("", offset)
|
||||
c.cache[offset] = location
|
||||
}
|
||||
|
||||
return location
|
||||
}
|
||||
|
||||
var infinityTsEnabled = false
|
||||
var infinityTsNegative time.Time
|
||||
var infinityTsPositive time.Time
|
||||
|
||||
const (
|
||||
infinityTsEnabledAlready = "pq: infinity timestamp enabled already"
|
||||
infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
|
||||
)
|
||||
|
||||
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
|
||||
// "infinity" "timestamp"s.
|
||||
//
|
||||
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
|
||||
// []byte("-infinity") and []byte("infinity") respectively, and potentially
|
||||
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
|
||||
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
|
||||
//
|
||||
// Once EnableInfinityTs has been called, all connections created using this
|
||||
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
|
||||
// "timestamp with time zone" and "date" types to the predefined minimum and
|
||||
// maximum times, respectively. When encoding time.Time values, any time which
|
||||
// equals or precedes the predefined minimum time will be encoded to
|
||||
// "-infinity". Any values at or past the maximum time will similarly be
|
||||
// encoded to "infinity".
|
||||
//
|
||||
// If EnableInfinityTs is called with negative >= positive, it will panic.
|
||||
// Calling EnableInfinityTs after a connection has been established results in
|
||||
// undefined behavior. If EnableInfinityTs is called more than once, it will
|
||||
// panic.
|
||||
func EnableInfinityTs(negative time.Time, positive time.Time) {
|
||||
if infinityTsEnabled {
|
||||
panic(infinityTsEnabledAlready)
|
||||
}
|
||||
if !negative.Before(positive) {
|
||||
panic(infinityTsNegativeMustBeSmaller)
|
||||
}
|
||||
infinityTsEnabled = true
|
||||
infinityTsNegative = negative
|
||||
infinityTsPositive = positive
|
||||
}
|
||||
|
||||
/*
|
||||
* Testing might want to toggle infinityTsEnabled
|
||||
*/
|
||||
func disableInfinityTs() {
|
||||
infinityTsEnabled = false
|
||||
}
|
||||
|
||||
// This is a time function specific to the Postgres default DateStyle
|
||||
// setting ("ISO, MDY"), the only one we currently support. This
|
||||
// accounts for the discrepancies between the parsing available with
|
||||
// time.Parse and the Postgres date formatting quirks.
|
||||
func parseTs(currentLocation *time.Location, str string) interface{} {
|
||||
switch str {
|
||||
case "-infinity":
|
||||
if infinityTsEnabled {
|
||||
return infinityTsNegative
|
||||
}
|
||||
return []byte(str)
|
||||
case "infinity":
|
||||
if infinityTsEnabled {
|
||||
return infinityTsPositive
|
||||
}
|
||||
return []byte(str)
|
||||
}
|
||||
t, err := ParseTimestamp(currentLocation, str)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
|
||||
// currentLocation iff that time's offset agrees with the offset sent from the
|
||||
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
|
||||
// fixed offset offset provided by the Postgres server.
|
||||
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
|
||||
p := timestampParser{}
|
||||
|
||||
monSep := strings.IndexRune(str, '-')
|
||||
// this is Gregorian year, not ISO Year
|
||||
// In Gregorian system, the year 1 BC is followed by AD 1
|
||||
year := p.mustAtoi(str, 0, monSep)
|
||||
daySep := monSep + 3
|
||||
month := p.mustAtoi(str, monSep+1, daySep)
|
||||
p.expect(str, '-', daySep)
|
||||
timeSep := daySep + 3
|
||||
day := p.mustAtoi(str, daySep+1, timeSep)
|
||||
|
||||
var hour, minute, second int
|
||||
if len(str) > monSep+len("01-01")+1 {
|
||||
p.expect(str, ' ', timeSep)
|
||||
minSep := timeSep + 3
|
||||
p.expect(str, ':', minSep)
|
||||
hour = p.mustAtoi(str, timeSep+1, minSep)
|
||||
secSep := minSep + 3
|
||||
p.expect(str, ':', secSep)
|
||||
minute = p.mustAtoi(str, minSep+1, secSep)
|
||||
secEnd := secSep + 3
|
||||
second = p.mustAtoi(str, secSep+1, secEnd)
|
||||
}
|
||||
remainderIdx := monSep + len("01-01 00:00:00") + 1
|
||||
// Three optional (but ordered) sections follow: the
|
||||
// fractional seconds, the time zone offset, and the BC
|
||||
// designation. We set them up here and adjust the other
|
||||
// offsets if the preceding sections exist.
|
||||
|
||||
nanoSec := 0
|
||||
tzOff := 0
|
||||
|
||||
if remainderIdx < len(str) && str[remainderIdx] == '.' {
|
||||
fracStart := remainderIdx + 1
|
||||
fracOff := strings.IndexAny(str[fracStart:], "-+ ")
|
||||
if fracOff < 0 {
|
||||
fracOff = len(str) - fracStart
|
||||
}
|
||||
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
|
||||
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
|
||||
|
||||
remainderIdx += fracOff + 1
|
||||
}
|
||||
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
|
||||
// time zone separator is always '-' or '+' (UTC is +00)
|
||||
var tzSign int
|
||||
switch c := str[tzStart]; c {
|
||||
case '-':
|
||||
tzSign = -1
|
||||
case '+':
|
||||
tzSign = +1
|
||||
default:
|
||||
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
|
||||
}
|
||||
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
|
||||
remainderIdx += 3
|
||||
var tzMin, tzSec int
|
||||
if remainderIdx < len(str) && str[remainderIdx] == ':' {
|
||||
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
|
||||
remainderIdx += 3
|
||||
}
|
||||
if remainderIdx < len(str) && str[remainderIdx] == ':' {
|
||||
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
|
||||
remainderIdx += 3
|
||||
}
|
||||
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
|
||||
}
|
||||
var isoYear int
|
||||
if remainderIdx+3 <= len(str) && str[remainderIdx:remainderIdx+3] == " BC" {
|
||||
isoYear = 1 - year
|
||||
remainderIdx += 3
|
||||
} else {
|
||||
isoYear = year
|
||||
}
|
||||
if remainderIdx < len(str) {
|
||||
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
|
||||
}
|
||||
t := time.Date(isoYear, time.Month(month), day,
|
||||
hour, minute, second, nanoSec,
|
||||
globalLocationCache.getLocation(tzOff))
|
||||
|
||||
if currentLocation != nil {
|
||||
// Set the location of the returned Time based on the session's
|
||||
// TimeZone value, but only if the local time zone database agrees with
|
||||
// the remote database on the offset.
|
||||
lt := t.In(currentLocation)
|
||||
_, newOff := lt.Zone()
|
||||
if newOff == tzOff {
|
||||
t = lt
|
||||
}
|
||||
}
|
||||
|
||||
return t, p.err
|
||||
}
|
||||
|
||||
// formatTs formats t into a format postgres understands.
|
||||
func formatTs(t time.Time) []byte {
|
||||
if infinityTsEnabled {
|
||||
// t <= -infinity : ! (t > -infinity)
|
||||
if !t.After(infinityTsNegative) {
|
||||
return []byte("-infinity")
|
||||
}
|
||||
// t >= infinity : ! (!t < infinity)
|
||||
if !t.Before(infinityTsPositive) {
|
||||
return []byte("infinity")
|
||||
}
|
||||
}
|
||||
return FormatTimestamp(t)
|
||||
}
|
||||
|
||||
// FormatTimestamp formats t into Postgres' text format for timestamps.
|
||||
func FormatTimestamp(t time.Time) []byte {
|
||||
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
|
||||
// minus sign preferred by Go.
|
||||
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
|
||||
bc := false
|
||||
if t.Year() <= 0 {
|
||||
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
|
||||
t = t.AddDate((-t.Year())*2+1, 0, 0)
|
||||
bc = true
|
||||
}
|
||||
b := []byte(t.Format(time.RFC3339Nano))
|
||||
|
||||
_, offset := t.Zone()
|
||||
offset = offset % 60
|
||||
if offset != 0 {
|
||||
// RFC3339Nano already printed the minus sign
|
||||
if offset < 0 {
|
||||
offset = -offset
|
||||
}
|
||||
|
||||
b = append(b, ':')
|
||||
if offset < 10 {
|
||||
b = append(b, '0')
|
||||
}
|
||||
b = strconv.AppendInt(b, int64(offset), 10)
|
||||
}
|
||||
|
||||
if bc {
|
||||
b = append(b, " BC"...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Parse a bytea value received from the server. Both "hex" and the legacy
|
||||
// "escape" format are supported.
|
||||
func parseBytea(s []byte) (result []byte, err error) {
|
||||
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
|
||||
// bytea_output = hex
|
||||
s = s[2:] // trim off leading "\\x"
|
||||
result = make([]byte, hex.DecodedLen(len(s)))
|
||||
_, err := hex.Decode(result, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// bytea_output = escape
|
||||
for len(s) > 0 {
|
||||
if s[0] == '\\' {
|
||||
// escaped '\\'
|
||||
if len(s) >= 2 && s[1] == '\\' {
|
||||
result = append(result, '\\')
|
||||
s = s[2:]
|
||||
continue
|
||||
}
|
||||
|
||||
// '\\' followed by an octal number
|
||||
if len(s) < 4 {
|
||||
return nil, fmt.Errorf("invalid bytea sequence %v", s)
|
||||
}
|
||||
r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
|
||||
}
|
||||
result = append(result, byte(r))
|
||||
s = s[4:]
|
||||
} else {
|
||||
// We hit an unescaped, raw byte. Try to read in as many as
|
||||
// possible in one go.
|
||||
i := bytes.IndexByte(s, '\\')
|
||||
if i == -1 {
|
||||
result = append(result, s...)
|
||||
break
|
||||
}
|
||||
result = append(result, s[:i]...)
|
||||
s = s[i:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func encodeBytea(serverVersion int, v []byte) (result []byte) {
|
||||
if serverVersion >= 90000 {
|
||||
// Use the hex format if we know that the server supports it
|
||||
result = make([]byte, 2+hex.EncodedLen(len(v)))
|
||||
result[0] = '\\'
|
||||
result[1] = 'x'
|
||||
hex.Encode(result[2:], v)
|
||||
} else {
|
||||
// .. or resort to "escape"
|
||||
for _, b := range v {
|
||||
if b == '\\' {
|
||||
result = append(result, '\\', '\\')
|
||||
} else if b < 0x20 || b > 0x7e {
|
||||
result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
|
||||
} else {
|
||||
result = append(result, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// NullTime represents a time.Time that may be null. NullTime implements the
|
||||
// sql.Scanner interface so it can be used as a scan destination, similar to
|
||||
// sql.NullString.
|
||||
type NullTime struct {
|
||||
Time time.Time
|
||||
Valid bool // Valid is true if Time is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (nt *NullTime) Scan(value interface{}) error {
|
||||
nt.Time, nt.Valid = value.(time.Time)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nt NullTime) Value() (driver.Value, error) {
|
||||
if !nt.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return nt.Time, nil
|
||||
}
|
738
vendor/github.com/lib/pq/encode_test.go
generated
vendored
Normal file
738
vendor/github.com/lib/pq/encode_test.go
generated
vendored
Normal file
|
@ -0,0 +1,738 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq/oid"
|
||||
)
|
||||
|
||||
func TestScanTimestamp(t *testing.T) {
|
||||
var nt NullTime
|
||||
tn := time.Now()
|
||||
nt.Scan(tn)
|
||||
if !nt.Valid {
|
||||
t.Errorf("Expected Valid=false")
|
||||
}
|
||||
if nt.Time != tn {
|
||||
t.Errorf("Time value mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanNilTimestamp(t *testing.T) {
|
||||
var nt NullTime
|
||||
nt.Scan(nil)
|
||||
if nt.Valid {
|
||||
t.Errorf("Expected Valid=false")
|
||||
}
|
||||
}
|
||||
|
||||
var timeTests = []struct {
|
||||
str string
|
||||
timeval time.Time
|
||||
}{
|
||||
{"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
|
||||
{"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))},
|
||||
{"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000,
|
||||
time.FixedZone("", -7*60*60))},
|
||||
{"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
|
||||
time.FixedZone("", -7*60*60))},
|
||||
{"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0,
|
||||
time.FixedZone("", -(7*60*60+42*60)))},
|
||||
{"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0,
|
||||
time.FixedZone("", -(7*60*60+30*60+9)))},
|
||||
{"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
|
||||
time.FixedZone("", 7*60*60))},
|
||||
{"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
|
||||
{"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
|
||||
{"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000,
|
||||
time.FixedZone("", -7*60*60))},
|
||||
{"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
|
||||
{"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
|
||||
{"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
|
||||
{"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
|
||||
{"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
|
||||
{"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
|
||||
{"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
|
||||
}
|
||||
|
||||
// Test that parsing the string results in the expected value.
|
||||
func TestParseTs(t *testing.T) {
|
||||
for i, tt := range timeTests {
|
||||
val, err := ParseTimestamp(nil, tt.str)
|
||||
if err != nil {
|
||||
t.Errorf("%d: got error: %v", i, err)
|
||||
} else if val.String() != tt.timeval.String() {
|
||||
t.Errorf("%d: expected to parse %q into %q; got %q",
|
||||
i, tt.str, tt.timeval, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var timeErrorTests = []string{
|
||||
"2001",
|
||||
"2001-2-03",
|
||||
"2001-02-3",
|
||||
"2001-02-03 ",
|
||||
"2001-02-03 04",
|
||||
"2001-02-03 04:",
|
||||
"2001-02-03 04:05",
|
||||
"2001-02-03 04:05:",
|
||||
"2001-02-03 04:05:6",
|
||||
"2001-02-03 04:05:06.123 B",
|
||||
}
|
||||
|
||||
// Test that parsing the string results in an error.
|
||||
func TestParseTsErrors(t *testing.T) {
|
||||
for i, tt := range timeErrorTests {
|
||||
_, err := ParseTimestamp(nil, tt)
|
||||
if err == nil {
|
||||
t.Errorf("%d: expected an error from parsing: %v", i, tt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now test that sending the value into the database and parsing it back
|
||||
// returns the same time.Time value.
|
||||
func TestEncodeAndParseTs(t *testing.T) {
|
||||
db, err := openTestConnConninfo("timezone='Etc/UTC'")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
for i, tt := range timeTests {
|
||||
var dbstr string
|
||||
err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr)
|
||||
if err != nil {
|
||||
t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err)
|
||||
continue
|
||||
}
|
||||
|
||||
val, err := ParseTimestamp(nil, dbstr)
|
||||
if err != nil {
|
||||
t.Errorf("%d: could not parse value %q: %s", i, dbstr, err)
|
||||
continue
|
||||
}
|
||||
val = val.In(tt.timeval.Location())
|
||||
if val.String() != tt.timeval.String() {
|
||||
t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var formatTimeTests = []struct {
|
||||
time time.Time
|
||||
expected string
|
||||
}{
|
||||
{time.Time{}, "0001-01-01T00:00:00Z"},
|
||||
{time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03T04:05:06.123456789Z"},
|
||||
{time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03T04:05:06.123456789+02:00"},
|
||||
{time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03T04:05:06.123456789-06:00"},
|
||||
{time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03T04:05:06-07:30:09"},
|
||||
|
||||
{time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z"},
|
||||
{time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00"},
|
||||
{time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00"},
|
||||
|
||||
{time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z BC"},
|
||||
{time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00 BC"},
|
||||
{time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00 BC"},
|
||||
|
||||
{time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09"},
|
||||
{time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09 BC"},
|
||||
}
|
||||
|
||||
func TestFormatTs(t *testing.T) {
|
||||
for i, tt := range formatTimeTests {
|
||||
val := string(formatTs(tt.time))
|
||||
if val != tt.expected {
|
||||
t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampWithTimeZone(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// try several different locations, all included in Go's zoneinfo.zip
|
||||
for _, locName := range []string{
|
||||
"UTC",
|
||||
"America/Chicago",
|
||||
"America/New_York",
|
||||
"Australia/Darwin",
|
||||
"Australia/Perth",
|
||||
} {
|
||||
loc, err := time.LoadLocation(locName)
|
||||
if err != nil {
|
||||
t.Logf("Could not load time zone %s - skipping", locName)
|
||||
continue
|
||||
}
|
||||
|
||||
// Postgres timestamps have a resolution of 1 microsecond, so don't
|
||||
// use the full range of the Nanosecond argument
|
||||
refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc)
|
||||
|
||||
for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} {
|
||||
// Switch Postgres's timezone to test different output timestamp formats
|
||||
_, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var gotTime time.Time
|
||||
row := tx.QueryRow("select $1::timestamp with time zone", refTime)
|
||||
err = row.Scan(&gotTime)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !refTime.Equal(gotTime) {
|
||||
t.Errorf("timestamps not equal: %s != %s", refTime, gotTime)
|
||||
}
|
||||
|
||||
// check that the time zone is set correctly based on TimeZone
|
||||
pgLoc, err := time.LoadLocation(pgTimeZone)
|
||||
if err != nil {
|
||||
t.Logf("Could not load time zone %s - skipping", pgLoc)
|
||||
continue
|
||||
}
|
||||
translated := refTime.In(pgLoc)
|
||||
if translated.String() != gotTime.String() {
|
||||
t.Errorf("timestamps not equal: %s != %s", translated, gotTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampWithOutTimezone(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
test := func(ts, pgts string) {
|
||||
r, err := db.Query("SELECT $1::timestamp", pgts)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not run query: %v", err)
|
||||
}
|
||||
|
||||
n := r.Next()
|
||||
|
||||
if n != true {
|
||||
t.Fatal("Expected at least one row")
|
||||
}
|
||||
|
||||
var result time.Time
|
||||
err = r.Scan(&result)
|
||||
if err != nil {
|
||||
t.Fatalf("Did not expect error scanning row: %v", err)
|
||||
}
|
||||
|
||||
expected, err := time.Parse(time.RFC3339, ts)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse test time literal: %v", err)
|
||||
}
|
||||
|
||||
if !result.Equal(expected) {
|
||||
t.Fatalf("Expected time to match %v: got mismatch %v",
|
||||
expected, result)
|
||||
}
|
||||
|
||||
n = r.Next()
|
||||
if n != false {
|
||||
t.Fatal("Expected only one row")
|
||||
}
|
||||
}
|
||||
|
||||
test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00")
|
||||
|
||||
// Test higher precision time
|
||||
test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033")
|
||||
}
|
||||
|
||||
func TestInfinityTimestamp(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
var err error
|
||||
var resultT time.Time
|
||||
|
||||
expectedErrorStrPrefix := `sql: Scan error on column index 0: unsupported`
|
||||
type testCases []struct {
|
||||
Query string
|
||||
Param string
|
||||
ExpectedErrStrPrefix string
|
||||
ExpectedVal interface{}
|
||||
}
|
||||
tc := testCases{
|
||||
{"SELECT $1::timestamp", "-infinity", expectedErrorStrPrefix, "-infinity"},
|
||||
{"SELECT $1::timestamptz", "-infinity", expectedErrorStrPrefix, "-infinity"},
|
||||
{"SELECT $1::timestamp", "infinity", expectedErrorStrPrefix, "infinity"},
|
||||
{"SELECT $1::timestamptz", "infinity", expectedErrorStrPrefix, "infinity"},
|
||||
}
|
||||
// try to assert []byte to time.Time
|
||||
for _, q := range tc {
|
||||
err = db.QueryRow(q.Query, q.Param).Scan(&resultT)
|
||||
if !strings.HasPrefix(err.Error(), q.ExpectedErrStrPrefix) {
|
||||
t.Errorf("Scanning -/+infinity, expected error to have prefix %q, got %q", q.ExpectedErrStrPrefix, err)
|
||||
}
|
||||
}
|
||||
// yield []byte
|
||||
for _, q := range tc {
|
||||
var resultI interface{}
|
||||
err = db.QueryRow(q.Query, q.Param).Scan(&resultI)
|
||||
if err != nil {
|
||||
t.Errorf("Scanning -/+infinity, expected no error, got %q", err)
|
||||
}
|
||||
result, ok := resultI.([]byte)
|
||||
if !ok {
|
||||
t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI)
|
||||
}
|
||||
if string(result) != q.ExpectedVal {
|
||||
t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result)
|
||||
}
|
||||
}
|
||||
|
||||
y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
EnableInfinityTs(y1500, y2500)
|
||||
|
||||
err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT)
|
||||
if err != nil {
|
||||
t.Errorf("Scanning infinity, expected no error, got %q", err)
|
||||
}
|
||||
if !resultT.Equal(y2500) {
|
||||
t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT)
|
||||
}
|
||||
|
||||
err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT)
|
||||
if err != nil {
|
||||
t.Errorf("Scanning infinity, expected no error, got %q", err)
|
||||
}
|
||||
if !resultT.Equal(y2500) {
|
||||
t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String())
|
||||
}
|
||||
|
||||
err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT)
|
||||
if err != nil {
|
||||
t.Errorf("Scanning -infinity, expected no error, got %q", err)
|
||||
}
|
||||
if !resultT.Equal(y1500) {
|
||||
t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String())
|
||||
}
|
||||
|
||||
err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT)
|
||||
if err != nil {
|
||||
t.Errorf("Scanning -infinity, expected no error, got %q", err)
|
||||
}
|
||||
if !resultT.Equal(y1500) {
|
||||
t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String())
|
||||
}
|
||||
|
||||
y_1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
var s string
|
||||
err = db.QueryRow("SELECT $1::timestamp::text", y_1500).Scan(&s)
|
||||
if err != nil {
|
||||
t.Errorf("Encoding -infinity, expected no error, got %q", err)
|
||||
}
|
||||
if s != "-infinity" {
|
||||
t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s)
|
||||
}
|
||||
err = db.QueryRow("SELECT $1::timestamptz::text", y_1500).Scan(&s)
|
||||
if err != nil {
|
||||
t.Errorf("Encoding -infinity, expected no error, got %q", err)
|
||||
}
|
||||
if s != "-infinity" {
|
||||
t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s)
|
||||
}
|
||||
|
||||
err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s)
|
||||
if err != nil {
|
||||
t.Errorf("Encoding infinity, expected no error, got %q", err)
|
||||
}
|
||||
if s != "infinity" {
|
||||
t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s)
|
||||
}
|
||||
err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s)
|
||||
if err != nil {
|
||||
t.Errorf("Encoding infinity, expected no error, got %q", err)
|
||||
}
|
||||
if s != "infinity" {
|
||||
t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s)
|
||||
}
|
||||
|
||||
disableInfinityTs()
|
||||
|
||||
var panicErrorString string
|
||||
func() {
|
||||
defer func() {
|
||||
panicErrorString, _ = recover().(string)
|
||||
}()
|
||||
EnableInfinityTs(y2500, y1500)
|
||||
}()
|
||||
if panicErrorString != infinityTsNegativeMustBeSmaller {
|
||||
t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringWithNul(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
hello0world := string("hello\x00world")
|
||||
_, err := db.Query("SELECT $1::text", &hello0world)
|
||||
if err == nil {
|
||||
t.Fatal("Postgres accepts a string with nul in it; " +
|
||||
"injection attacks may be plausible")
|
||||
}
|
||||
}
|
||||
|
||||
func TestByteSliceToText(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
b := []byte("hello world")
|
||||
row := db.QueryRow("SELECT $1::text", b)
|
||||
|
||||
var result []byte
|
||||
err := row.Scan(&result)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(result) != string(b) {
|
||||
t.Fatalf("expected %v but got %v", b, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringToBytea(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
b := "hello world"
|
||||
row := db.QueryRow("SELECT $1::bytea", b)
|
||||
|
||||
var result []byte
|
||||
err := row.Scan(&result)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(result, []byte(b)) {
|
||||
t.Fatalf("expected %v but got %v", b, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTextByteSliceToUUID(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
|
||||
row := db.QueryRow("SELECT $1::uuid", b)
|
||||
|
||||
var result string
|
||||
err := row.Scan(&result)
|
||||
if forceBinaryParameters() {
|
||||
pqErr := err.(*Error)
|
||||
if pqErr == nil {
|
||||
t.Errorf("Expected to get error")
|
||||
} else if pqErr.Code != "22P03" {
|
||||
t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if result != string(b) {
|
||||
t.Fatalf("expected %v but got %v", b, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryByteSlicetoUUID(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
b := []byte{'\xa0', '\xee', '\xbc', '\x99',
|
||||
'\x9c', '\x0b',
|
||||
'\x4e', '\xf8',
|
||||
'\xbb', '\x00', '\x6b',
|
||||
'\xb9', '\xbd', '\x38', '\x0a', '\x11'}
|
||||
row := db.QueryRow("SELECT $1::uuid", b)
|
||||
|
||||
var result string
|
||||
err := row.Scan(&result)
|
||||
if forceBinaryParameters() {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") {
|
||||
t.Fatalf("expected %v but got %v", b, result)
|
||||
}
|
||||
} else {
|
||||
pqErr := err.(*Error)
|
||||
if pqErr == nil {
|
||||
t.Errorf("Expected to get error")
|
||||
} else if pqErr.Code != "22021" {
|
||||
t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringToUUID(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11"
|
||||
row := db.QueryRow("SELECT $1::uuid", s)
|
||||
|
||||
var result string
|
||||
err := row.Scan(&result)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if result != s {
|
||||
t.Fatalf("expected %v but got %v", s, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTextByteSliceToInt(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
expected := 12345678
|
||||
b := []byte(fmt.Sprintf("%d", expected))
|
||||
row := db.QueryRow("SELECT $1::int", b)
|
||||
|
||||
var result int
|
||||
err := row.Scan(&result)
|
||||
if forceBinaryParameters() {
|
||||
pqErr := err.(*Error)
|
||||
if pqErr == nil {
|
||||
t.Errorf("Expected to get error")
|
||||
} else if pqErr.Code != "22P03" {
|
||||
t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if result != expected {
|
||||
t.Fatalf("expected %v but got %v", expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryByteSliceToInt(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
expected := 12345678
|
||||
b := []byte{'\x00', '\xbc', '\x61', '\x4e'}
|
||||
row := db.QueryRow("SELECT $1::int", b)
|
||||
|
||||
var result int
|
||||
err := row.Scan(&result)
|
||||
if forceBinaryParameters() {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if result != expected {
|
||||
t.Fatalf("expected %v but got %v", expected, result)
|
||||
}
|
||||
} else {
|
||||
pqErr := err.(*Error)
|
||||
if pqErr == nil {
|
||||
t.Errorf("Expected to get error")
|
||||
} else if pqErr.Code != "22021" {
|
||||
t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTextDecodeIntoString(t *testing.T) {
|
||||
input := []byte("hello world")
|
||||
want := string(input)
|
||||
for _, typ := range []oid.Oid{oid.T_char, oid.T_varchar, oid.T_text} {
|
||||
got := decode(¶meterStatus{}, input, typ, formatText)
|
||||
if got != want {
|
||||
t.Errorf("invalid string decoding output for %T(%+v), got %v but expected %v", typ, typ, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestByteaOutputFormatEncoding(t *testing.T) {
|
||||
input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123")
|
||||
want := []byte("\\x5c78000102fffe6162636465666730313233")
|
||||
got := encode(¶meterStatus{serverVersion: 90000}, input, oid.T_bytea)
|
||||
if !bytes.Equal(want, got) {
|
||||
t.Errorf("invalid hex bytea output, got %v but expected %v", got, want)
|
||||
}
|
||||
|
||||
want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123")
|
||||
got = encode(¶meterStatus{serverVersion: 84000}, input, oid.T_bytea)
|
||||
if !bytes.Equal(want, got) {
|
||||
t.Errorf("invalid escape bytea output, got %v but expected %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestByteaOutputFormats(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
if getServerVersion(t, db) < 90000 {
|
||||
// skip
|
||||
return
|
||||
}
|
||||
|
||||
testByteaOutputFormat := func(f string, usePrepared bool) {
|
||||
expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08")
|
||||
sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')"
|
||||
|
||||
var data []byte
|
||||
|
||||
// use a txn to avoid relying on getting the same connection
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer txn.Rollback()
|
||||
|
||||
_, err = txn.Exec("SET LOCAL bytea_output TO " + f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var rows *sql.Rows
|
||||
var stmt *sql.Stmt
|
||||
if usePrepared {
|
||||
stmt, err = txn.Prepare(sqlQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rows, err = stmt.Query()
|
||||
} else {
|
||||
// use Query; QueryRow would hide the actual error
|
||||
rows, err = txn.Query(sqlQuery)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !rows.Next() {
|
||||
if rows.Err() != nil {
|
||||
t.Fatal(rows.Err())
|
||||
}
|
||||
t.Fatal("shouldn't happen")
|
||||
}
|
||||
err = rows.Scan(&data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = rows.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stmt != nil {
|
||||
err = stmt.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(data, expectedData) {
|
||||
t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData)
|
||||
}
|
||||
}
|
||||
|
||||
testByteaOutputFormat("hex", false)
|
||||
testByteaOutputFormat("escape", false)
|
||||
testByteaOutputFormat("hex", true)
|
||||
testByteaOutputFormat("escape", true)
|
||||
}
|
||||
|
||||
func TestAppendEncodedText(t *testing.T) {
|
||||
var buf []byte
|
||||
|
||||
buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, int64(10))
|
||||
buf = append(buf, '\t')
|
||||
buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, 42.0000000001)
|
||||
buf = append(buf, '\t')
|
||||
buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, "hello\tworld")
|
||||
buf = append(buf, '\t')
|
||||
buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255})
|
||||
|
||||
if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" {
|
||||
t.Fatal(string(buf))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendEscapedText(t *testing.T) {
|
||||
if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" {
|
||||
t.Fatal(string(esc))
|
||||
}
|
||||
if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" {
|
||||
t.Fatal(string(esc))
|
||||
}
|
||||
if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" {
|
||||
t.Fatal(string(esc))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendEscapedTextExistingBuffer(t *testing.T) {
|
||||
var buf []byte
|
||||
buf = []byte("123\t")
|
||||
if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" {
|
||||
t.Fatal(string(esc))
|
||||
}
|
||||
buf = []byte("123\t")
|
||||
if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" {
|
||||
t.Fatal(string(esc))
|
||||
}
|
||||
buf = []byte("123\t")
|
||||
if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" {
|
||||
t.Fatal(string(esc))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendEscapedText(b *testing.B) {
|
||||
longString := ""
|
||||
for i := 0; i < 100; i++ {
|
||||
longString += "123456789\n"
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
appendEscapedText(nil, longString)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendEscapedTextNoEscape(b *testing.B) {
|
||||
longString := ""
|
||||
for i := 0; i < 100; i++ {
|
||||
longString += "1234567890"
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
appendEscapedText(nil, longString)
|
||||
}
|
||||
}
|
508
vendor/github.com/lib/pq/error.go
generated
vendored
Normal file
508
vendor/github.com/lib/pq/error.go
generated
vendored
Normal file
|
@ -0,0 +1,508 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Error severities
|
||||
const (
|
||||
Efatal = "FATAL"
|
||||
Epanic = "PANIC"
|
||||
Ewarning = "WARNING"
|
||||
Enotice = "NOTICE"
|
||||
Edebug = "DEBUG"
|
||||
Einfo = "INFO"
|
||||
Elog = "LOG"
|
||||
)
|
||||
|
||||
// Error represents an error communicating with the server.
|
||||
//
|
||||
// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields
|
||||
type Error struct {
|
||||
Severity string
|
||||
Code ErrorCode
|
||||
Message string
|
||||
Detail string
|
||||
Hint string
|
||||
Position string
|
||||
InternalPosition string
|
||||
InternalQuery string
|
||||
Where string
|
||||
Schema string
|
||||
Table string
|
||||
Column string
|
||||
DataTypeName string
|
||||
Constraint string
|
||||
File string
|
||||
Line string
|
||||
Routine string
|
||||
}
|
||||
|
||||
// ErrorCode is a five-character error code.
|
||||
type ErrorCode string
|
||||
|
||||
// Name returns a more human friendly rendering of the error code, namely the
|
||||
// "condition name".
|
||||
//
|
||||
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
|
||||
// details.
|
||||
func (ec ErrorCode) Name() string {
|
||||
return errorCodeNames[ec]
|
||||
}
|
||||
|
||||
// ErrorClass is only the class part of an error code.
|
||||
type ErrorClass string
|
||||
|
||||
// Name returns the condition name of an error class. It is equivalent to the
|
||||
// condition name of the "standard" error code (i.e. the one having the last
|
||||
// three characters "000").
|
||||
func (ec ErrorClass) Name() string {
|
||||
return errorCodeNames[ErrorCode(ec+"000")]
|
||||
}
|
||||
|
||||
// Class returns the error class, e.g. "28".
|
||||
//
|
||||
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
|
||||
// details.
|
||||
func (ec ErrorCode) Class() ErrorClass {
|
||||
return ErrorClass(ec[0:2])
|
||||
}
|
||||
|
||||
// errorCodeNames is a mapping between the five-character error codes and the
|
||||
// human readable "condition names". It is derived from the list at
|
||||
// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
|
||||
var errorCodeNames = map[ErrorCode]string{
|
||||
// Class 00 - Successful Completion
|
||||
"00000": "successful_completion",
|
||||
// Class 01 - Warning
|
||||
"01000": "warning",
|
||||
"0100C": "dynamic_result_sets_returned",
|
||||
"01008": "implicit_zero_bit_padding",
|
||||
"01003": "null_value_eliminated_in_set_function",
|
||||
"01007": "privilege_not_granted",
|
||||
"01006": "privilege_not_revoked",
|
||||
"01004": "string_data_right_truncation",
|
||||
"01P01": "deprecated_feature",
|
||||
// Class 02 - No Data (this is also a warning class per the SQL standard)
|
||||
"02000": "no_data",
|
||||
"02001": "no_additional_dynamic_result_sets_returned",
|
||||
// Class 03 - SQL Statement Not Yet Complete
|
||||
"03000": "sql_statement_not_yet_complete",
|
||||
// Class 08 - Connection Exception
|
||||
"08000": "connection_exception",
|
||||
"08003": "connection_does_not_exist",
|
||||
"08006": "connection_failure",
|
||||
"08001": "sqlclient_unable_to_establish_sqlconnection",
|
||||
"08004": "sqlserver_rejected_establishment_of_sqlconnection",
|
||||
"08007": "transaction_resolution_unknown",
|
||||
"08P01": "protocol_violation",
|
||||
// Class 09 - Triggered Action Exception
|
||||
"09000": "triggered_action_exception",
|
||||
// Class 0A - Feature Not Supported
|
||||
"0A000": "feature_not_supported",
|
||||
// Class 0B - Invalid Transaction Initiation
|
||||
"0B000": "invalid_transaction_initiation",
|
||||
// Class 0F - Locator Exception
|
||||
"0F000": "locator_exception",
|
||||
"0F001": "invalid_locator_specification",
|
||||
// Class 0L - Invalid Grantor
|
||||
"0L000": "invalid_grantor",
|
||||
"0LP01": "invalid_grant_operation",
|
||||
// Class 0P - Invalid Role Specification
|
||||
"0P000": "invalid_role_specification",
|
||||
// Class 0Z - Diagnostics Exception
|
||||
"0Z000": "diagnostics_exception",
|
||||
"0Z002": "stacked_diagnostics_accessed_without_active_handler",
|
||||
// Class 20 - Case Not Found
|
||||
"20000": "case_not_found",
|
||||
// Class 21 - Cardinality Violation
|
||||
"21000": "cardinality_violation",
|
||||
// Class 22 - Data Exception
|
||||
"22000": "data_exception",
|
||||
"2202E": "array_subscript_error",
|
||||
"22021": "character_not_in_repertoire",
|
||||
"22008": "datetime_field_overflow",
|
||||
"22012": "division_by_zero",
|
||||
"22005": "error_in_assignment",
|
||||
"2200B": "escape_character_conflict",
|
||||
"22022": "indicator_overflow",
|
||||
"22015": "interval_field_overflow",
|
||||
"2201E": "invalid_argument_for_logarithm",
|
||||
"22014": "invalid_argument_for_ntile_function",
|
||||
"22016": "invalid_argument_for_nth_value_function",
|
||||
"2201F": "invalid_argument_for_power_function",
|
||||
"2201G": "invalid_argument_for_width_bucket_function",
|
||||
"22018": "invalid_character_value_for_cast",
|
||||
"22007": "invalid_datetime_format",
|
||||
"22019": "invalid_escape_character",
|
||||
"2200D": "invalid_escape_octet",
|
||||
"22025": "invalid_escape_sequence",
|
||||
"22P06": "nonstandard_use_of_escape_character",
|
||||
"22010": "invalid_indicator_parameter_value",
|
||||
"22023": "invalid_parameter_value",
|
||||
"2201B": "invalid_regular_expression",
|
||||
"2201W": "invalid_row_count_in_limit_clause",
|
||||
"2201X": "invalid_row_count_in_result_offset_clause",
|
||||
"22009": "invalid_time_zone_displacement_value",
|
||||
"2200C": "invalid_use_of_escape_character",
|
||||
"2200G": "most_specific_type_mismatch",
|
||||
"22004": "null_value_not_allowed",
|
||||
"22002": "null_value_no_indicator_parameter",
|
||||
"22003": "numeric_value_out_of_range",
|
||||
"22026": "string_data_length_mismatch",
|
||||
"22001": "string_data_right_truncation",
|
||||
"22011": "substring_error",
|
||||
"22027": "trim_error",
|
||||
"22024": "unterminated_c_string",
|
||||
"2200F": "zero_length_character_string",
|
||||
"22P01": "floating_point_exception",
|
||||
"22P02": "invalid_text_representation",
|
||||
"22P03": "invalid_binary_representation",
|
||||
"22P04": "bad_copy_file_format",
|
||||
"22P05": "untranslatable_character",
|
||||
"2200L": "not_an_xml_document",
|
||||
"2200M": "invalid_xml_document",
|
||||
"2200N": "invalid_xml_content",
|
||||
"2200S": "invalid_xml_comment",
|
||||
"2200T": "invalid_xml_processing_instruction",
|
||||
// Class 23 - Integrity Constraint Violation
|
||||
"23000": "integrity_constraint_violation",
|
||||
"23001": "restrict_violation",
|
||||
"23502": "not_null_violation",
|
||||
"23503": "foreign_key_violation",
|
||||
"23505": "unique_violation",
|
||||
"23514": "check_violation",
|
||||
"23P01": "exclusion_violation",
|
||||
// Class 24 - Invalid Cursor State
|
||||
"24000": "invalid_cursor_state",
|
||||
// Class 25 - Invalid Transaction State
|
||||
"25000": "invalid_transaction_state",
|
||||
"25001": "active_sql_transaction",
|
||||
"25002": "branch_transaction_already_active",
|
||||
"25008": "held_cursor_requires_same_isolation_level",
|
||||
"25003": "inappropriate_access_mode_for_branch_transaction",
|
||||
"25004": "inappropriate_isolation_level_for_branch_transaction",
|
||||
"25005": "no_active_sql_transaction_for_branch_transaction",
|
||||
"25006": "read_only_sql_transaction",
|
||||
"25007": "schema_and_data_statement_mixing_not_supported",
|
||||
"25P01": "no_active_sql_transaction",
|
||||
"25P02": "in_failed_sql_transaction",
|
||||
// Class 26 - Invalid SQL Statement Name
|
||||
"26000": "invalid_sql_statement_name",
|
||||
// Class 27 - Triggered Data Change Violation
|
||||
"27000": "triggered_data_change_violation",
|
||||
// Class 28 - Invalid Authorization Specification
|
||||
"28000": "invalid_authorization_specification",
|
||||
"28P01": "invalid_password",
|
||||
// Class 2B - Dependent Privilege Descriptors Still Exist
|
||||
"2B000": "dependent_privilege_descriptors_still_exist",
|
||||
"2BP01": "dependent_objects_still_exist",
|
||||
// Class 2D - Invalid Transaction Termination
|
||||
"2D000": "invalid_transaction_termination",
|
||||
// Class 2F - SQL Routine Exception
|
||||
"2F000": "sql_routine_exception",
|
||||
"2F005": "function_executed_no_return_statement",
|
||||
"2F002": "modifying_sql_data_not_permitted",
|
||||
"2F003": "prohibited_sql_statement_attempted",
|
||||
"2F004": "reading_sql_data_not_permitted",
|
||||
// Class 34 - Invalid Cursor Name
|
||||
"34000": "invalid_cursor_name",
|
||||
// Class 38 - External Routine Exception
|
||||
"38000": "external_routine_exception",
|
||||
"38001": "containing_sql_not_permitted",
|
||||
"38002": "modifying_sql_data_not_permitted",
|
||||
"38003": "prohibited_sql_statement_attempted",
|
||||
"38004": "reading_sql_data_not_permitted",
|
||||
// Class 39 - External Routine Invocation Exception
|
||||
"39000": "external_routine_invocation_exception",
|
||||
"39001": "invalid_sqlstate_returned",
|
||||
"39004": "null_value_not_allowed",
|
||||
"39P01": "trigger_protocol_violated",
|
||||
"39P02": "srf_protocol_violated",
|
||||
// Class 3B - Savepoint Exception
|
||||
"3B000": "savepoint_exception",
|
||||
"3B001": "invalid_savepoint_specification",
|
||||
// Class 3D - Invalid Catalog Name
|
||||
"3D000": "invalid_catalog_name",
|
||||
// Class 3F - Invalid Schema Name
|
||||
"3F000": "invalid_schema_name",
|
||||
// Class 40 - Transaction Rollback
|
||||
"40000": "transaction_rollback",
|
||||
"40002": "transaction_integrity_constraint_violation",
|
||||
"40001": "serialization_failure",
|
||||
"40003": "statement_completion_unknown",
|
||||
"40P01": "deadlock_detected",
|
||||
// Class 42 - Syntax Error or Access Rule Violation
|
||||
"42000": "syntax_error_or_access_rule_violation",
|
||||
"42601": "syntax_error",
|
||||
"42501": "insufficient_privilege",
|
||||
"42846": "cannot_coerce",
|
||||
"42803": "grouping_error",
|
||||
"42P20": "windowing_error",
|
||||
"42P19": "invalid_recursion",
|
||||
"42830": "invalid_foreign_key",
|
||||
"42602": "invalid_name",
|
||||
"42622": "name_too_long",
|
||||
"42939": "reserved_name",
|
||||
"42804": "datatype_mismatch",
|
||||
"42P18": "indeterminate_datatype",
|
||||
"42P21": "collation_mismatch",
|
||||
"42P22": "indeterminate_collation",
|
||||
"42809": "wrong_object_type",
|
||||
"42703": "undefined_column",
|
||||
"42883": "undefined_function",
|
||||
"42P01": "undefined_table",
|
||||
"42P02": "undefined_parameter",
|
||||
"42704": "undefined_object",
|
||||
"42701": "duplicate_column",
|
||||
"42P03": "duplicate_cursor",
|
||||
"42P04": "duplicate_database",
|
||||
"42723": "duplicate_function",
|
||||
"42P05": "duplicate_prepared_statement",
|
||||
"42P06": "duplicate_schema",
|
||||
"42P07": "duplicate_table",
|
||||
"42712": "duplicate_alias",
|
||||
"42710": "duplicate_object",
|
||||
"42702": "ambiguous_column",
|
||||
"42725": "ambiguous_function",
|
||||
"42P08": "ambiguous_parameter",
|
||||
"42P09": "ambiguous_alias",
|
||||
"42P10": "invalid_column_reference",
|
||||
"42611": "invalid_column_definition",
|
||||
"42P11": "invalid_cursor_definition",
|
||||
"42P12": "invalid_database_definition",
|
||||
"42P13": "invalid_function_definition",
|
||||
"42P14": "invalid_prepared_statement_definition",
|
||||
"42P15": "invalid_schema_definition",
|
||||
"42P16": "invalid_table_definition",
|
||||
"42P17": "invalid_object_definition",
|
||||
// Class 44 - WITH CHECK OPTION Violation
|
||||
"44000": "with_check_option_violation",
|
||||
// Class 53 - Insufficient Resources
|
||||
"53000": "insufficient_resources",
|
||||
"53100": "disk_full",
|
||||
"53200": "out_of_memory",
|
||||
"53300": "too_many_connections",
|
||||
"53400": "configuration_limit_exceeded",
|
||||
// Class 54 - Program Limit Exceeded
|
||||
"54000": "program_limit_exceeded",
|
||||
"54001": "statement_too_complex",
|
||||
"54011": "too_many_columns",
|
||||
"54023": "too_many_arguments",
|
||||
// Class 55 - Object Not In Prerequisite State
|
||||
"55000": "object_not_in_prerequisite_state",
|
||||
"55006": "object_in_use",
|
||||
"55P02": "cant_change_runtime_param",
|
||||
"55P03": "lock_not_available",
|
||||
// Class 57 - Operator Intervention
|
||||
"57000": "operator_intervention",
|
||||
"57014": "query_canceled",
|
||||
"57P01": "admin_shutdown",
|
||||
"57P02": "crash_shutdown",
|
||||
"57P03": "cannot_connect_now",
|
||||
"57P04": "database_dropped",
|
||||
// Class 58 - System Error (errors external to PostgreSQL itself)
|
||||
"58000": "system_error",
|
||||
"58030": "io_error",
|
||||
"58P01": "undefined_file",
|
||||
"58P02": "duplicate_file",
|
||||
// Class F0 - Configuration File Error
|
||||
"F0000": "config_file_error",
|
||||
"F0001": "lock_file_exists",
|
||||
// Class HV - Foreign Data Wrapper Error (SQL/MED)
|
||||
"HV000": "fdw_error",
|
||||
"HV005": "fdw_column_name_not_found",
|
||||
"HV002": "fdw_dynamic_parameter_value_needed",
|
||||
"HV010": "fdw_function_sequence_error",
|
||||
"HV021": "fdw_inconsistent_descriptor_information",
|
||||
"HV024": "fdw_invalid_attribute_value",
|
||||
"HV007": "fdw_invalid_column_name",
|
||||
"HV008": "fdw_invalid_column_number",
|
||||
"HV004": "fdw_invalid_data_type",
|
||||
"HV006": "fdw_invalid_data_type_descriptors",
|
||||
"HV091": "fdw_invalid_descriptor_field_identifier",
|
||||
"HV00B": "fdw_invalid_handle",
|
||||
"HV00C": "fdw_invalid_option_index",
|
||||
"HV00D": "fdw_invalid_option_name",
|
||||
"HV090": "fdw_invalid_string_length_or_buffer_length",
|
||||
"HV00A": "fdw_invalid_string_format",
|
||||
"HV009": "fdw_invalid_use_of_null_pointer",
|
||||
"HV014": "fdw_too_many_handles",
|
||||
"HV001": "fdw_out_of_memory",
|
||||
"HV00P": "fdw_no_schemas",
|
||||
"HV00J": "fdw_option_name_not_found",
|
||||
"HV00K": "fdw_reply_handle",
|
||||
"HV00Q": "fdw_schema_not_found",
|
||||
"HV00R": "fdw_table_not_found",
|
||||
"HV00L": "fdw_unable_to_create_execution",
|
||||
"HV00M": "fdw_unable_to_create_reply",
|
||||
"HV00N": "fdw_unable_to_establish_connection",
|
||||
// Class P0 - PL/pgSQL Error
|
||||
"P0000": "plpgsql_error",
|
||||
"P0001": "raise_exception",
|
||||
"P0002": "no_data_found",
|
||||
"P0003": "too_many_rows",
|
||||
// Class XX - Internal Error
|
||||
"XX000": "internal_error",
|
||||
"XX001": "data_corrupted",
|
||||
"XX002": "index_corrupted",
|
||||
}
|
||||
|
||||
func parseError(r *readBuf) *Error {
|
||||
err := new(Error)
|
||||
for t := r.byte(); t != 0; t = r.byte() {
|
||||
msg := r.string()
|
||||
switch t {
|
||||
case 'S':
|
||||
err.Severity = msg
|
||||
case 'C':
|
||||
err.Code = ErrorCode(msg)
|
||||
case 'M':
|
||||
err.Message = msg
|
||||
case 'D':
|
||||
err.Detail = msg
|
||||
case 'H':
|
||||
err.Hint = msg
|
||||
case 'P':
|
||||
err.Position = msg
|
||||
case 'p':
|
||||
err.InternalPosition = msg
|
||||
case 'q':
|
||||
err.InternalQuery = msg
|
||||
case 'W':
|
||||
err.Where = msg
|
||||
case 's':
|
||||
err.Schema = msg
|
||||
case 't':
|
||||
err.Table = msg
|
||||
case 'c':
|
||||
err.Column = msg
|
||||
case 'd':
|
||||
err.DataTypeName = msg
|
||||
case 'n':
|
||||
err.Constraint = msg
|
||||
case 'F':
|
||||
err.File = msg
|
||||
case 'L':
|
||||
err.Line = msg
|
||||
case 'R':
|
||||
err.Routine = msg
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Fatal returns true if the Error Severity is fatal.
|
||||
func (err *Error) Fatal() bool {
|
||||
return err.Severity == Efatal
|
||||
}
|
||||
|
||||
// Get implements the legacy PGError interface. New code should use the fields
|
||||
// of the Error struct directly.
|
||||
func (err *Error) Get(k byte) (v string) {
|
||||
switch k {
|
||||
case 'S':
|
||||
return err.Severity
|
||||
case 'C':
|
||||
return string(err.Code)
|
||||
case 'M':
|
||||
return err.Message
|
||||
case 'D':
|
||||
return err.Detail
|
||||
case 'H':
|
||||
return err.Hint
|
||||
case 'P':
|
||||
return err.Position
|
||||
case 'p':
|
||||
return err.InternalPosition
|
||||
case 'q':
|
||||
return err.InternalQuery
|
||||
case 'W':
|
||||
return err.Where
|
||||
case 's':
|
||||
return err.Schema
|
||||
case 't':
|
||||
return err.Table
|
||||
case 'c':
|
||||
return err.Column
|
||||
case 'd':
|
||||
return err.DataTypeName
|
||||
case 'n':
|
||||
return err.Constraint
|
||||
case 'F':
|
||||
return err.File
|
||||
case 'L':
|
||||
return err.Line
|
||||
case 'R':
|
||||
return err.Routine
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (err Error) Error() string {
|
||||
return "pq: " + err.Message
|
||||
}
|
||||
|
||||
// PGError is an interface used by previous versions of pq. It is provided
|
||||
// only to support legacy code. New code should use the Error type.
|
||||
type PGError interface {
|
||||
Error() string
|
||||
Fatal() bool
|
||||
Get(k byte) (v string)
|
||||
}
|
||||
|
||||
func errorf(s string, args ...interface{}) {
|
||||
panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
|
||||
}
|
||||
|
||||
func errRecoverNoErrBadConn(err *error) {
|
||||
e := recover()
|
||||
if e == nil {
|
||||
// Do nothing
|
||||
return
|
||||
}
|
||||
var ok bool
|
||||
*err, ok = e.(error)
|
||||
if !ok {
|
||||
*err = fmt.Errorf("pq: unexpected error: %#v", e)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *conn) errRecover(err *error) {
|
||||
e := recover()
|
||||
switch v := e.(type) {
|
||||
case nil:
|
||||
// Do nothing
|
||||
case runtime.Error:
|
||||
c.bad = true
|
||||
panic(v)
|
||||
case *Error:
|
||||
if v.Fatal() {
|
||||
*err = driver.ErrBadConn
|
||||
} else {
|
||||
*err = v
|
||||
}
|
||||
case *net.OpError:
|
||||
*err = driver.ErrBadConn
|
||||
case error:
|
||||
if v == io.EOF || v.(error).Error() == "remote error: handshake failure" {
|
||||
*err = driver.ErrBadConn
|
||||
} else {
|
||||
*err = v
|
||||
}
|
||||
|
||||
default:
|
||||
c.bad = true
|
||||
panic(fmt.Sprintf("unknown error: %#v", e))
|
||||
}
|
||||
|
||||
// Any time we return ErrBadConn, we need to remember it since *Tx doesn't
|
||||
// mark the connection bad in database/sql.
|
||||
if *err == driver.ErrBadConn {
|
||||
c.bad = true
|
||||
}
|
||||
}
|
118
vendor/github.com/lib/pq/hstore/hstore.go
generated
vendored
Normal file
118
vendor/github.com/lib/pq/hstore/hstore.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
package hstore
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A wrapper for transferring Hstore values back and forth easily.
|
||||
type Hstore struct {
|
||||
Map map[string]sql.NullString
|
||||
}
|
||||
|
||||
// escapes and quotes hstore keys/values
|
||||
// s should be a sql.NullString or string
|
||||
func hQuote(s interface{}) string {
|
||||
var str string
|
||||
switch v := s.(type) {
|
||||
case sql.NullString:
|
||||
if !v.Valid {
|
||||
return "NULL"
|
||||
}
|
||||
str = v.String
|
||||
case string:
|
||||
str = v
|
||||
default:
|
||||
panic("not a string or sql.NullString")
|
||||
}
|
||||
|
||||
str = strings.Replace(str, "\\", "\\\\", -1)
|
||||
return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"`
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
//
|
||||
// Note h.Map is reallocated before the scan to clear existing values. If the
|
||||
// hstore column's database value is NULL, then h.Map is set to nil instead.
|
||||
func (h *Hstore) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
h.Map = nil
|
||||
return nil
|
||||
}
|
||||
h.Map = make(map[string]sql.NullString)
|
||||
var b byte
|
||||
pair := [][]byte{{}, {}}
|
||||
pi := 0
|
||||
inQuote := false
|
||||
didQuote := false
|
||||
sawSlash := false
|
||||
bindex := 0
|
||||
for bindex, b = range value.([]byte) {
|
||||
if sawSlash {
|
||||
pair[pi] = append(pair[pi], b)
|
||||
sawSlash = false
|
||||
continue
|
||||
}
|
||||
|
||||
switch b {
|
||||
case '\\':
|
||||
sawSlash = true
|
||||
continue
|
||||
case '"':
|
||||
inQuote = !inQuote
|
||||
if !didQuote {
|
||||
didQuote = true
|
||||
}
|
||||
continue
|
||||
default:
|
||||
if !inQuote {
|
||||
switch b {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
continue
|
||||
case '=':
|
||||
continue
|
||||
case '>':
|
||||
pi = 1
|
||||
didQuote = false
|
||||
continue
|
||||
case ',':
|
||||
s := string(pair[1])
|
||||
if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" {
|
||||
h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false}
|
||||
} else {
|
||||
h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true}
|
||||
}
|
||||
pair[0] = []byte{}
|
||||
pair[1] = []byte{}
|
||||
pi = 0
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
pair[pi] = append(pair[pi], b)
|
||||
}
|
||||
if bindex > 0 {
|
||||
s := string(pair[1])
|
||||
if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" {
|
||||
h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false}
|
||||
} else {
|
||||
h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface. Note if h.Map is nil, the
|
||||
// database column value will be set to NULL.
|
||||
func (h Hstore) Value() (driver.Value, error) {
|
||||
if h.Map == nil {
|
||||
return nil, nil
|
||||
}
|
||||
parts := []string{}
|
||||
for key, val := range h.Map {
|
||||
thispart := hQuote(key) + "=>" + hQuote(val)
|
||||
parts = append(parts, thispart)
|
||||
}
|
||||
return []byte(strings.Join(parts, ",")), nil
|
||||
}
|
148
vendor/github.com/lib/pq/hstore/hstore_test.go
generated
vendored
Normal file
148
vendor/github.com/lib/pq/hstore/hstore_test.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
package hstore
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
type Fatalistic interface {
|
||||
Fatal(args ...interface{})
|
||||
}
|
||||
|
||||
func openTestConn(t Fatalistic) *sql.DB {
|
||||
datname := os.Getenv("PGDATABASE")
|
||||
sslmode := os.Getenv("PGSSLMODE")
|
||||
|
||||
if datname == "" {
|
||||
os.Setenv("PGDATABASE", "pqgotest")
|
||||
}
|
||||
|
||||
if sslmode == "" {
|
||||
os.Setenv("PGSSLMODE", "disable")
|
||||
}
|
||||
|
||||
conn, err := sql.Open("postgres", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return conn
|
||||
}
|
||||
|
||||
func TestHstore(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
// quitely create hstore if it doesn't exist
|
||||
_, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore")
|
||||
if err != nil {
|
||||
t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error())
|
||||
}
|
||||
|
||||
hs := Hstore{}
|
||||
|
||||
// test for null-valued hstores
|
||||
err = db.QueryRow("SELECT NULL::hstore").Scan(&hs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hs.Map != nil {
|
||||
t.Fatalf("expected null map")
|
||||
}
|
||||
|
||||
err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
|
||||
if err != nil {
|
||||
t.Fatalf("re-query null map failed: %s", err.Error())
|
||||
}
|
||||
if hs.Map != nil {
|
||||
t.Fatalf("expected null map")
|
||||
}
|
||||
|
||||
// test for empty hstores
|
||||
err = db.QueryRow("SELECT ''::hstore").Scan(&hs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hs.Map == nil {
|
||||
t.Fatalf("expected empty map, got null map")
|
||||
}
|
||||
if len(hs.Map) != 0 {
|
||||
t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
|
||||
}
|
||||
|
||||
err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
|
||||
if err != nil {
|
||||
t.Fatalf("re-query empty map failed: %s", err.Error())
|
||||
}
|
||||
if hs.Map == nil {
|
||||
t.Fatalf("expected empty map, got null map")
|
||||
}
|
||||
if len(hs.Map) != 0 {
|
||||
t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
|
||||
}
|
||||
|
||||
// a few example maps to test out
|
||||
hsOnePair := Hstore{
|
||||
Map: map[string]sql.NullString{
|
||||
"key1": {String: "value1", Valid: true},
|
||||
},
|
||||
}
|
||||
|
||||
hsThreePairs := Hstore{
|
||||
Map: map[string]sql.NullString{
|
||||
"key1": {String: "value1", Valid: true},
|
||||
"key2": {String: "value2", Valid: true},
|
||||
"key3": {String: "value3", Valid: true},
|
||||
},
|
||||
}
|
||||
|
||||
hsSmorgasbord := Hstore{
|
||||
Map: map[string]sql.NullString{
|
||||
"nullstring": {String: "NULL", Valid: true},
|
||||
"actuallynull": {String: "", Valid: false},
|
||||
"NULL": {String: "NULL string key", Valid: true},
|
||||
"withbracket": {String: "value>42", Valid: true},
|
||||
"withequal": {String: "value=42", Valid: true},
|
||||
`"withquotes1"`: {String: `this "should" be fine`, Valid: true},
|
||||
`"withquotes"2"`: {String: `this "should\" also be fine`, Valid: true},
|
||||
"embedded1": {String: "value1=>x1", Valid: true},
|
||||
"embedded2": {String: `"value2"=>x2`, Valid: true},
|
||||
"withnewlines": {String: "\n\nvalue\t=>2", Valid: true},
|
||||
"<<all sorts of crazy>>": {String: `this, "should,\" also, => be fine`, Valid: true},
|
||||
},
|
||||
}
|
||||
|
||||
// test encoding in query params, then decoding during Scan
|
||||
testBidirectional := func(h Hstore) {
|
||||
err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs)
|
||||
if err != nil {
|
||||
t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error())
|
||||
}
|
||||
if hs.Map == nil {
|
||||
t.Fatalf("expected %d-pair map, got null map", len(h.Map))
|
||||
}
|
||||
if len(hs.Map) != len(h.Map) {
|
||||
t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map))
|
||||
}
|
||||
|
||||
for key, val := range hs.Map {
|
||||
otherval, found := h.Map[key]
|
||||
if !found {
|
||||
t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map))
|
||||
}
|
||||
if otherval.Valid != val.Valid {
|
||||
t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map))
|
||||
}
|
||||
if otherval.String != val.String {
|
||||
t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testBidirectional(hsOnePair)
|
||||
testBidirectional(hsThreePairs)
|
||||
testBidirectional(hsSmorgasbord)
|
||||
}
|
102
vendor/github.com/lib/pq/listen_example/doc.go
generated
vendored
Normal file
102
vendor/github.com/lib/pq/listen_example/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
|
||||
Below you will find a self-contained Go program which uses the LISTEN / NOTIFY
|
||||
mechanism to avoid polling the database while waiting for more work to arrive.
|
||||
|
||||
//
|
||||
// You can see the program in action by defining a function similar to
|
||||
// the following:
|
||||
//
|
||||
// CREATE OR REPLACE FUNCTION public.get_work()
|
||||
// RETURNS bigint
|
||||
// LANGUAGE sql
|
||||
// AS $$
|
||||
// SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END
|
||||
// $$
|
||||
// ;
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
func doWork(db *sql.DB, work int64) {
|
||||
// work here
|
||||
}
|
||||
|
||||
func getWork(db *sql.DB) {
|
||||
for {
|
||||
// get work from the database here
|
||||
var work sql.NullInt64
|
||||
err := db.QueryRow("SELECT get_work()").Scan(&work)
|
||||
if err != nil {
|
||||
fmt.Println("call to get_work() failed: ", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
if !work.Valid {
|
||||
// no more work to do
|
||||
fmt.Println("ran out of work")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("starting work on ", work.Int64)
|
||||
go doWork(db, work.Int64)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNotification(l *pq.Listener) {
|
||||
for {
|
||||
select {
|
||||
case <-l.Notify:
|
||||
fmt.Println("received notification, new work available")
|
||||
return
|
||||
case <-time.After(90 * time.Second):
|
||||
go func() {
|
||||
l.Ping()
|
||||
}()
|
||||
// Check if there's more work available, just in case it takes
|
||||
// a while for the Listener to notice connection loss and
|
||||
// reconnect.
|
||||
fmt.Println("received no work for 90 seconds, checking for new work")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var conninfo string = ""
|
||||
|
||||
db, err := sql.Open("postgres", conninfo)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
reportProblem := func(ev pq.ListenerEventType, err error) {
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
listener := pq.NewListener(conninfo, 10 * time.Second, time.Minute, reportProblem)
|
||||
err = listener.Listen("getwork")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Println("entering main loop")
|
||||
for {
|
||||
// process all available work before waiting for notifications
|
||||
getWork(db)
|
||||
waitForNotification(listener)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
*/
|
||||
package listen_example
|
782
vendor/github.com/lib/pq/notify.go
generated
vendored
Normal file
782
vendor/github.com/lib/pq/notify.go
generated
vendored
Normal file
|
@ -0,0 +1,782 @@
|
|||
package pq
|
||||
|
||||
// Package pq is a pure Go Postgres driver for the database/sql package.
|
||||
// This module contains support for Postgres LISTEN/NOTIFY.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Notification represents a single notification from the database.
|
||||
type Notification struct {
|
||||
// Process ID (PID) of the notifying postgres backend.
|
||||
BePid int
|
||||
// Name of the channel the notification was sent on.
|
||||
Channel string
|
||||
// Payload, or the empty string if unspecified.
|
||||
Extra string
|
||||
}
|
||||
|
||||
func recvNotification(r *readBuf) *Notification {
|
||||
bePid := r.int32()
|
||||
channel := r.string()
|
||||
extra := r.string()
|
||||
|
||||
return &Notification{bePid, channel, extra}
|
||||
}
|
||||
|
||||
const (
|
||||
connStateIdle int32 = iota
|
||||
connStateExpectResponse
|
||||
connStateExpectReadyForQuery
|
||||
)
|
||||
|
||||
type message struct {
|
||||
typ byte
|
||||
err error
|
||||
}
|
||||
|
||||
var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
|
||||
|
||||
// ListenerConn is a low-level interface for waiting for notifications. You
|
||||
// should use Listener instead.
|
||||
type ListenerConn struct {
|
||||
// guards cn and err
|
||||
connectionLock sync.Mutex
|
||||
cn *conn
|
||||
err error
|
||||
|
||||
connState int32
|
||||
|
||||
// the sending goroutine will be holding this lock
|
||||
senderLock sync.Mutex
|
||||
|
||||
notificationChan chan<- *Notification
|
||||
|
||||
replyChan chan message
|
||||
}
|
||||
|
||||
// Creates a new ListenerConn. Use NewListener instead.
|
||||
func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
|
||||
return newDialListenerConn(defaultDialer{}, name, notificationChan)
|
||||
}
|
||||
|
||||
func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
|
||||
cn, err := DialOpen(d, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l := &ListenerConn{
|
||||
cn: cn.(*conn),
|
||||
notificationChan: c,
|
||||
connState: connStateIdle,
|
||||
replyChan: make(chan message, 2),
|
||||
}
|
||||
|
||||
go l.listenerConnMain()
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// We can only allow one goroutine at a time to be running a query on the
|
||||
// connection for various reasons, so the goroutine sending on the connection
|
||||
// must be holding senderLock.
|
||||
//
|
||||
// Returns an error if an unrecoverable error has occurred and the ListenerConn
|
||||
// should be abandoned.
|
||||
func (l *ListenerConn) acquireSenderLock() error {
|
||||
// we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery
|
||||
l.senderLock.Lock()
|
||||
|
||||
l.connectionLock.Lock()
|
||||
err := l.err
|
||||
l.connectionLock.Unlock()
|
||||
if err != nil {
|
||||
l.senderLock.Unlock()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *ListenerConn) releaseSenderLock() {
|
||||
l.senderLock.Unlock()
|
||||
}
|
||||
|
||||
// setState advances the protocol state to newState. Returns false if moving
|
||||
// to that state from the current state is not allowed.
|
||||
func (l *ListenerConn) setState(newState int32) bool {
|
||||
var expectedState int32
|
||||
|
||||
switch newState {
|
||||
case connStateIdle:
|
||||
expectedState = connStateExpectReadyForQuery
|
||||
case connStateExpectResponse:
|
||||
expectedState = connStateIdle
|
||||
case connStateExpectReadyForQuery:
|
||||
expectedState = connStateExpectResponse
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
|
||||
}
|
||||
|
||||
return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
|
||||
}
|
||||
|
||||
// Main logic is here: receive messages from the postgres backend, forward
|
||||
// notifications and query replies and keep the internal state in sync with the
|
||||
// protocol state. Returns when the connection has been lost, is about to go
|
||||
// away or should be discarded because we couldn't agree on the state with the
|
||||
// server backend.
|
||||
func (l *ListenerConn) listenerConnLoop() (err error) {
|
||||
defer errRecoverNoErrBadConn(&err)
|
||||
|
||||
r := &readBuf{}
|
||||
for {
|
||||
t, err := l.cn.recvMessage(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch t {
|
||||
case 'A':
|
||||
// recvNotification copies all the data so we don't need to worry
|
||||
// about the scratch buffer being overwritten.
|
||||
l.notificationChan <- recvNotification(r)
|
||||
|
||||
case 'T', 'D':
|
||||
// only used by tests; ignore
|
||||
|
||||
case 'E':
|
||||
// We might receive an ErrorResponse even when not in a query; it
|
||||
// is expected that the server will close the connection after
|
||||
// that, but we should make sure that the error we display is the
|
||||
// one from the stray ErrorResponse, not io.ErrUnexpectedEOF.
|
||||
if !l.setState(connStateExpectReadyForQuery) {
|
||||
return parseError(r)
|
||||
}
|
||||
l.replyChan <- message{t, parseError(r)}
|
||||
|
||||
case 'C', 'I':
|
||||
if !l.setState(connStateExpectReadyForQuery) {
|
||||
// protocol out of sync
|
||||
return fmt.Errorf("unexpected CommandComplete")
|
||||
}
|
||||
// ExecSimpleQuery doesn't need to know about this message
|
||||
|
||||
case 'Z':
|
||||
if !l.setState(connStateIdle) {
|
||||
// protocol out of sync
|
||||
return fmt.Errorf("unexpected ReadyForQuery")
|
||||
}
|
||||
l.replyChan <- message{t, nil}
|
||||
|
||||
case 'N', 'S':
|
||||
// ignore
|
||||
default:
|
||||
return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is the main routine for the goroutine receiving on the database
|
||||
// connection. Most of the main logic is in listenerConnLoop.
|
||||
func (l *ListenerConn) listenerConnMain() {
|
||||
err := l.listenerConnLoop()
|
||||
|
||||
// listenerConnLoop terminated; we're done, but we still have to clean up.
|
||||
// Make sure nobody tries to start any new queries by making sure the err
|
||||
// pointer is set. It is important that we do not overwrite its value; a
|
||||
// connection could be closed by either this goroutine or one sending on
|
||||
// the connection -- whoever closes the connection is assumed to have the
|
||||
// more meaningful error message (as the other one will probably get
|
||||
// net.errClosed), so that goroutine sets the error we expose while the
|
||||
// other error is discarded. If the connection is lost while two
|
||||
// goroutines are operating on the socket, it probably doesn't matter which
|
||||
// error we expose so we don't try to do anything more complex.
|
||||
l.connectionLock.Lock()
|
||||
if l.err == nil {
|
||||
l.err = err
|
||||
}
|
||||
l.cn.Close()
|
||||
l.connectionLock.Unlock()
|
||||
|
||||
// There might be a query in-flight; make sure nobody's waiting for a
|
||||
// response to it, since there's not going to be one.
|
||||
close(l.replyChan)
|
||||
|
||||
// let the listener know we're done
|
||||
close(l.notificationChan)
|
||||
|
||||
// this ListenerConn is done
|
||||
}
|
||||
|
||||
// Send a LISTEN query to the server. See ExecSimpleQuery.
|
||||
func (l *ListenerConn) Listen(channel string) (bool, error) {
|
||||
return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
|
||||
}
|
||||
|
||||
// Send an UNLISTEN query to the server. See ExecSimpleQuery.
|
||||
func (l *ListenerConn) Unlisten(channel string) (bool, error) {
|
||||
return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
|
||||
}
|
||||
|
||||
// Send `UNLISTEN *` to the server. See ExecSimpleQuery.
|
||||
func (l *ListenerConn) UnlistenAll() (bool, error) {
|
||||
return l.ExecSimpleQuery("UNLISTEN *")
|
||||
}
|
||||
|
||||
// Ping the remote server to make sure it's alive. Non-nil error means the
|
||||
// connection has failed and should be abandoned.
|
||||
func (l *ListenerConn) Ping() error {
|
||||
sent, err := l.ExecSimpleQuery("")
|
||||
if !sent {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
// shouldn't happen
|
||||
panic(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempt to send a query on the connection. Returns an error if sending the
|
||||
// query failed, and the caller should initiate closure of this connection.
|
||||
// The caller must be holding senderLock (see acquireSenderLock and
|
||||
// releaseSenderLock).
|
||||
func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
|
||||
defer errRecoverNoErrBadConn(&err)
|
||||
|
||||
// must set connection state before sending the query
|
||||
if !l.setState(connStateExpectResponse) {
|
||||
panic("two queries running at the same time")
|
||||
}
|
||||
|
||||
// Can't use l.cn.writeBuf here because it uses the scratch buffer which
|
||||
// might get overwritten by listenerConnLoop.
|
||||
b := &writeBuf{
|
||||
buf: []byte("Q\x00\x00\x00\x00"),
|
||||
pos: 1,
|
||||
}
|
||||
b.string(q)
|
||||
l.cn.send(b)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute a "simple query" (i.e. one with no bindable parameters) on the
|
||||
// connection. The possible return values are:
|
||||
// 1) "executed" is true; the query was executed to completion on the
|
||||
// database server. If the query failed, err will be set to the error
|
||||
// returned by the database, otherwise err will be nil.
|
||||
// 2) If "executed" is false, the query could not be executed on the remote
|
||||
// server. err will be non-nil.
|
||||
//
|
||||
// After a call to ExecSimpleQuery has returned an executed=false value, the
|
||||
// connection has either been closed or will be closed shortly thereafter, and
|
||||
// all subsequently executed queries will return an error.
|
||||
func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
|
||||
if err = l.acquireSenderLock(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer l.releaseSenderLock()
|
||||
|
||||
err = l.sendSimpleQuery(q)
|
||||
if err != nil {
|
||||
// We can't know what state the protocol is in, so we need to abandon
|
||||
// this connection.
|
||||
l.connectionLock.Lock()
|
||||
// Set the error pointer if it hasn't been set already; see
|
||||
// listenerConnMain.
|
||||
if l.err == nil {
|
||||
l.err = err
|
||||
}
|
||||
l.connectionLock.Unlock()
|
||||
l.cn.c.Close()
|
||||
return false, err
|
||||
}
|
||||
|
||||
// now we just wait for a reply..
|
||||
for {
|
||||
m, ok := <-l.replyChan
|
||||
if !ok {
|
||||
// We lost the connection to server, don't bother waiting for a
|
||||
// a response. err should have been set already.
|
||||
l.connectionLock.Lock()
|
||||
err := l.err
|
||||
l.connectionLock.Unlock()
|
||||
return false, err
|
||||
}
|
||||
switch m.typ {
|
||||
case 'Z':
|
||||
// sanity check
|
||||
if m.err != nil {
|
||||
panic("m.err != nil")
|
||||
}
|
||||
// done; err might or might not be set
|
||||
return true, err
|
||||
|
||||
case 'E':
|
||||
// sanity check
|
||||
if m.err == nil {
|
||||
panic("m.err == nil")
|
||||
}
|
||||
// server responded with an error; ReadyForQuery to follow
|
||||
err = m.err
|
||||
|
||||
default:
|
||||
return false, fmt.Errorf("unknown response for simple query: %q", m.typ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ListenerConn) Close() error {
|
||||
l.connectionLock.Lock()
|
||||
if l.err != nil {
|
||||
l.connectionLock.Unlock()
|
||||
return errListenerConnClosed
|
||||
}
|
||||
l.err = errListenerConnClosed
|
||||
l.connectionLock.Unlock()
|
||||
// We can't send anything on the connection without holding senderLock.
|
||||
// Simply close the net.Conn to wake up everyone operating on it.
|
||||
return l.cn.c.Close()
|
||||
}
|
||||
|
||||
// Err() returns the reason the connection was closed. It is not safe to call
|
||||
// this function until l.Notify has been closed.
|
||||
func (l *ListenerConn) Err() error {
|
||||
return l.err
|
||||
}
|
||||
|
||||
var errListenerClosed = errors.New("pq: Listener has been closed")
|
||||
|
||||
var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
|
||||
var ErrChannelNotOpen = errors.New("pq: channel is not open")
|
||||
|
||||
type ListenerEventType int
|
||||
|
||||
const (
|
||||
// Emitted only when the database connection has been initially
|
||||
// initialized. err will always be nil.
|
||||
ListenerEventConnected ListenerEventType = iota
|
||||
|
||||
// Emitted after a database connection has been lost, either because of an
|
||||
// error or because Close has been called. err will be set to the reason
|
||||
// the database connection was lost.
|
||||
ListenerEventDisconnected
|
||||
|
||||
// Emitted after a database connection has been re-established after
|
||||
// connection loss. err will always be nil. After this event has been
|
||||
// emitted, a nil pq.Notification is sent on the Listener.Notify channel.
|
||||
ListenerEventReconnected
|
||||
|
||||
// Emitted after a connection to the database was attempted, but failed.
|
||||
// err will be set to an error describing why the connection attempt did
|
||||
// not succeed.
|
||||
ListenerEventConnectionAttemptFailed
|
||||
)
|
||||
|
||||
type EventCallbackType func(event ListenerEventType, err error)
|
||||
|
||||
// Listener provides an interface for listening to notifications from a
|
||||
// PostgreSQL database. For general usage information, see section
|
||||
// "Notifications".
|
||||
//
|
||||
// Listener can safely be used from concurrently running goroutines.
|
||||
type Listener struct {
|
||||
// Channel for receiving notifications from the database. In some cases a
|
||||
// nil value will be sent. See section "Notifications" above.
|
||||
Notify chan *Notification
|
||||
|
||||
name string
|
||||
minReconnectInterval time.Duration
|
||||
maxReconnectInterval time.Duration
|
||||
dialer Dialer
|
||||
eventCallback EventCallbackType
|
||||
|
||||
lock sync.Mutex
|
||||
isClosed bool
|
||||
reconnectCond *sync.Cond
|
||||
cn *ListenerConn
|
||||
connNotificationChan <-chan *Notification
|
||||
channels map[string]struct{}
|
||||
}
|
||||
|
||||
// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
|
||||
//
|
||||
// name should be set to a connection string to be used to establish the
|
||||
// database connection (see section "Connection String Parameters" above).
|
||||
//
|
||||
// minReconnectInterval controls the duration to wait before trying to
|
||||
// re-establish the database connection after connection loss. After each
|
||||
// consecutive failure this interval is doubled, until maxReconnectInterval is
|
||||
// reached. Successfully completing the connection establishment procedure
|
||||
// resets the interval back to minReconnectInterval.
|
||||
//
|
||||
// The last parameter eventCallback can be set to a function which will be
|
||||
// called by the Listener when the state of the underlying database connection
|
||||
// changes. This callback will be called by the goroutine which dispatches the
|
||||
// notifications over the Notify channel, so you should try to avoid doing
|
||||
// potentially time-consuming operations from the callback.
|
||||
func NewListener(name string,
|
||||
minReconnectInterval time.Duration,
|
||||
maxReconnectInterval time.Duration,
|
||||
eventCallback EventCallbackType) *Listener {
|
||||
return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
|
||||
}
|
||||
|
||||
// NewDialListener is like NewListener but it takes a Dialer.
|
||||
func NewDialListener(d Dialer,
|
||||
name string,
|
||||
minReconnectInterval time.Duration,
|
||||
maxReconnectInterval time.Duration,
|
||||
eventCallback EventCallbackType) *Listener {
|
||||
|
||||
l := &Listener{
|
||||
name: name,
|
||||
minReconnectInterval: minReconnectInterval,
|
||||
maxReconnectInterval: maxReconnectInterval,
|
||||
dialer: d,
|
||||
eventCallback: eventCallback,
|
||||
|
||||
channels: make(map[string]struct{}),
|
||||
|
||||
Notify: make(chan *Notification, 32),
|
||||
}
|
||||
l.reconnectCond = sync.NewCond(&l.lock)
|
||||
|
||||
go l.listenerMain()
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Returns the notification channel for this listener. This is the same
|
||||
// channel as Notify, and will not be recreated during the life time of the
|
||||
// Listener.
|
||||
func (l *Listener) NotificationChannel() <-chan *Notification {
|
||||
return l.Notify
|
||||
}
|
||||
|
||||
// Listen starts listening for notifications on a channel. Calls to this
|
||||
// function will block until an acknowledgement has been received from the
|
||||
// server. Note that Listener automatically re-establishes the connection
|
||||
// after connection loss, so this function may block indefinitely if the
|
||||
// connection can not be re-established.
|
||||
//
|
||||
// Listen will only fail in three conditions:
|
||||
// 1) The channel is already open. The returned error will be
|
||||
// ErrChannelAlreadyOpen.
|
||||
// 2) The query was executed on the remote server, but PostgreSQL returned an
|
||||
// error message in response to the query. The returned error will be a
|
||||
// pq.Error containing the information the server supplied.
|
||||
// 3) Close is called on the Listener before the request could be completed.
|
||||
//
|
||||
// The channel name is case-sensitive.
|
||||
func (l *Listener) Listen(channel string) error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return errListenerClosed
|
||||
}
|
||||
|
||||
// The server allows you to issue a LISTEN on a channel which is already
|
||||
// open, but it seems useful to be able to detect this case to spot for
|
||||
// mistakes in application logic. If the application genuinely does't
|
||||
// care, it can check the exported error and ignore it.
|
||||
_, exists := l.channels[channel]
|
||||
if exists {
|
||||
return ErrChannelAlreadyOpen
|
||||
}
|
||||
|
||||
if l.cn != nil {
|
||||
// If gotResponse is true but error is set, the query was executed on
|
||||
// the remote server, but resulted in an error. This should be
|
||||
// relatively rare, so it's fine if we just pass the error to our
|
||||
// caller. However, if gotResponse is false, we could not complete the
|
||||
// query on the remote server and our underlying connection is about
|
||||
// to go away, so we only add relname to l.channels, and wait for
|
||||
// resync() to take care of the rest.
|
||||
gotResponse, err := l.cn.Listen(channel)
|
||||
if gotResponse && err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
l.channels[channel] = struct{}{}
|
||||
for l.cn == nil {
|
||||
l.reconnectCond.Wait()
|
||||
// we let go of the mutex for a while
|
||||
if l.isClosed {
|
||||
return errListenerClosed
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlisten removes a channel from the Listener's channel list. Returns
|
||||
// ErrChannelNotOpen if the Listener is not listening on the specified channel.
|
||||
// Returns immediately with no error if there is no connection. Note that you
|
||||
// might still get notifications for this channel even after Unlisten has
|
||||
// returned.
|
||||
//
|
||||
// The channel name is case-sensitive.
|
||||
func (l *Listener) Unlisten(channel string) error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return errListenerClosed
|
||||
}
|
||||
|
||||
// Similarly to LISTEN, this is not an error in Postgres, but it seems
|
||||
// useful to distinguish from the normal conditions.
|
||||
_, exists := l.channels[channel]
|
||||
if !exists {
|
||||
return ErrChannelNotOpen
|
||||
}
|
||||
|
||||
if l.cn != nil {
|
||||
// Similarly to Listen (see comment in that function), the caller
|
||||
// should only be bothered with an error if it came from the backend as
|
||||
// a response to our query.
|
||||
gotResponse, err := l.cn.Unlisten(channel)
|
||||
if gotResponse && err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Don't bother waiting for resync if there's no connection.
|
||||
delete(l.channels, channel)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnlistenAll removes all channels from the Listener's channel list. Returns
|
||||
// immediately with no error if there is no connection. Note that you might
|
||||
// still get notifications for any of the deleted channels even after
|
||||
// UnlistenAll has returned.
|
||||
func (l *Listener) UnlistenAll() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return errListenerClosed
|
||||
}
|
||||
|
||||
if l.cn != nil {
|
||||
// Similarly to Listen (see comment in that function), the caller
|
||||
// should only be bothered with an error if it came from the backend as
|
||||
// a response to our query.
|
||||
gotResponse, err := l.cn.UnlistenAll()
|
||||
if gotResponse && err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Don't bother waiting for resync if there's no connection.
|
||||
l.channels = make(map[string]struct{})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping the remote server to make sure it's alive. Non-nil return value means
|
||||
// that there is no active connection.
|
||||
func (l *Listener) Ping() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return errListenerClosed
|
||||
}
|
||||
if l.cn == nil {
|
||||
return errors.New("no connection")
|
||||
}
|
||||
|
||||
return l.cn.Ping()
|
||||
}
|
||||
|
||||
// Clean up after losing the server connection. Returns l.cn.Err(), which
|
||||
// should have the reason the connection was lost.
|
||||
func (l *Listener) disconnectCleanup() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
// sanity check; can't look at Err() until the channel has been closed
|
||||
select {
|
||||
case _, ok := <-l.connNotificationChan:
|
||||
if ok {
|
||||
panic("connNotificationChan not closed")
|
||||
}
|
||||
default:
|
||||
panic("connNotificationChan not closed")
|
||||
}
|
||||
|
||||
err := l.cn.Err()
|
||||
l.cn.Close()
|
||||
l.cn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Synchronize the list of channels we want to be listening on with the server
|
||||
// after the connection has been established.
|
||||
func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
|
||||
doneChan := make(chan error)
|
||||
go func() {
|
||||
for channel := range l.channels {
|
||||
// If we got a response, return that error to our caller as it's
|
||||
// going to be more descriptive than cn.Err().
|
||||
gotResponse, err := cn.Listen(channel)
|
||||
if gotResponse && err != nil {
|
||||
doneChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// If we couldn't reach the server, wait for notificationChan to
|
||||
// close and then return the error message from the connection, as
|
||||
// per ListenerConn's interface.
|
||||
if err != nil {
|
||||
for _ = range notificationChan {
|
||||
}
|
||||
doneChan <- cn.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
doneChan <- nil
|
||||
}()
|
||||
|
||||
// Ignore notifications while synchronization is going on to avoid
|
||||
// deadlocks. We have to send a nil notification over Notify anyway as
|
||||
// we can't possibly know which notifications (if any) were lost while
|
||||
// the connection was down, so there's no reason to try and process
|
||||
// these messages at all.
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-notificationChan:
|
||||
if !ok {
|
||||
notificationChan = nil
|
||||
}
|
||||
|
||||
case err := <-doneChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// caller should NOT be holding l.lock
|
||||
func (l *Listener) closed() bool {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
return l.isClosed
|
||||
}
|
||||
|
||||
func (l *Listener) connect() error {
|
||||
notificationChan := make(chan *Notification, 32)
|
||||
cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
err = l.resync(cn, notificationChan)
|
||||
if err != nil {
|
||||
cn.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
l.cn = cn
|
||||
l.connNotificationChan = notificationChan
|
||||
l.reconnectCond.Broadcast()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close disconnects the Listener from the database and shuts it down.
|
||||
// Subsequent calls to its methods will return an error. Close returns an
|
||||
// error if the connection has already been closed.
|
||||
func (l *Listener) Close() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return errListenerClosed
|
||||
}
|
||||
|
||||
if l.cn != nil {
|
||||
l.cn.Close()
|
||||
}
|
||||
l.isClosed = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Listener) emitEvent(event ListenerEventType, err error) {
|
||||
if l.eventCallback != nil {
|
||||
l.eventCallback(event, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Main logic here: maintain a connection to the server when possible, wait
|
||||
// for notifications and emit events.
|
||||
func (l *Listener) listenerConnLoop() {
|
||||
var nextReconnect time.Time
|
||||
|
||||
reconnectInterval := l.minReconnectInterval
|
||||
for {
|
||||
for {
|
||||
err := l.connect()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if l.closed() {
|
||||
return
|
||||
}
|
||||
l.emitEvent(ListenerEventConnectionAttemptFailed, err)
|
||||
|
||||
time.Sleep(reconnectInterval)
|
||||
reconnectInterval *= 2
|
||||
if reconnectInterval > l.maxReconnectInterval {
|
||||
reconnectInterval = l.maxReconnectInterval
|
||||
}
|
||||
}
|
||||
|
||||
if nextReconnect.IsZero() {
|
||||
l.emitEvent(ListenerEventConnected, nil)
|
||||
} else {
|
||||
l.emitEvent(ListenerEventReconnected, nil)
|
||||
l.Notify <- nil
|
||||
}
|
||||
|
||||
reconnectInterval = l.minReconnectInterval
|
||||
nextReconnect = time.Now().Add(reconnectInterval)
|
||||
|
||||
for {
|
||||
notification, ok := <-l.connNotificationChan
|
||||
if !ok {
|
||||
// lost connection, loop again
|
||||
break
|
||||
}
|
||||
l.Notify <- notification
|
||||
}
|
||||
|
||||
err := l.disconnectCleanup()
|
||||
if l.closed() {
|
||||
return
|
||||
}
|
||||
l.emitEvent(ListenerEventDisconnected, err)
|
||||
|
||||
time.Sleep(nextReconnect.Sub(time.Now()))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Listener) listenerMain() {
|
||||
l.listenerConnLoop()
|
||||
close(l.Notify)
|
||||
}
|
574
vendor/github.com/lib/pq/notify_test.go
generated
vendored
Normal file
574
vendor/github.com/lib/pq/notify_test.go
generated
vendored
Normal file
|
@ -0,0 +1,574 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errNilNotification = errors.New("nil notification")
|
||||
|
||||
func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error {
|
||||
select {
|
||||
case n := <-ch:
|
||||
if n == nil {
|
||||
return errNilNotification
|
||||
}
|
||||
if n.Channel != relname || n.Extra != extra {
|
||||
return fmt.Errorf("unexpected notification %v", n)
|
||||
}
|
||||
return nil
|
||||
case <-time.After(1500 * time.Millisecond):
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func expectNoNotification(t *testing.T, ch <-chan *Notification) error {
|
||||
select {
|
||||
case n := <-ch:
|
||||
return fmt.Errorf("unexpected notification %v", n)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error {
|
||||
select {
|
||||
case e := <-eventch:
|
||||
if e != et {
|
||||
return fmt.Errorf("unexpected event %v", e)
|
||||
}
|
||||
return nil
|
||||
case <-time.After(1500 * time.Millisecond):
|
||||
panic("expectEvent timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error {
|
||||
select {
|
||||
case e := <-eventch:
|
||||
return fmt.Errorf("unexpected event %v", e)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) {
|
||||
datname := os.Getenv("PGDATABASE")
|
||||
sslmode := os.Getenv("PGSSLMODE")
|
||||
|
||||
if datname == "" {
|
||||
os.Setenv("PGDATABASE", "pqgotest")
|
||||
}
|
||||
|
||||
if sslmode == "" {
|
||||
os.Setenv("PGSSLMODE", "disable")
|
||||
}
|
||||
|
||||
notificationChan := make(chan *Notification)
|
||||
l, err := NewListenerConn("", notificationChan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return l, notificationChan
|
||||
}
|
||||
|
||||
func TestNewListenerConn(t *testing.T) {
|
||||
l, _ := newTestListenerConn(t)
|
||||
|
||||
defer l.Close()
|
||||
}
|
||||
|
||||
func TestConnListen(t *testing.T) {
|
||||
l, channel := newTestListenerConn(t)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
ok, err := l.Listen("notify_test")
|
||||
if !ok || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, channel, "notify_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnUnlisten(t *testing.T) {
|
||||
l, channel := newTestListenerConn(t)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
ok, err := l.Listen("notify_test")
|
||||
if !ok || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_test")
|
||||
|
||||
err = expectNotification(t, channel, "notify_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ok, err = l.Unlisten("notify_test")
|
||||
if !ok || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNoNotification(t, channel)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnUnlistenAll(t *testing.T) {
|
||||
l, channel := newTestListenerConn(t)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
ok, err := l.Listen("notify_test")
|
||||
if !ok || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_test")
|
||||
|
||||
err = expectNotification(t, channel, "notify_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ok, err = l.UnlistenAll()
|
||||
if !ok || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNoNotification(t, channel)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnClose(t *testing.T) {
|
||||
l, _ := newTestListenerConn(t)
|
||||
defer l.Close()
|
||||
|
||||
err := l.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = l.Close()
|
||||
if err != errListenerConnClosed {
|
||||
t.Fatalf("expected errListenerConnClosed; got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnPing(t *testing.T) {
|
||||
l, _ := newTestListenerConn(t)
|
||||
defer l.Close()
|
||||
err := l.Ping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = l.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = l.Ping()
|
||||
if err != errListenerConnClosed {
|
||||
t.Fatalf("expected errListenerConnClosed; got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for deadlock where a query fails while another one is queued
|
||||
func TestConnExecDeadlock(t *testing.T) {
|
||||
l, _ := newTestListenerConn(t)
|
||||
defer l.Close()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
l.ExecSimpleQuery("SELECT pg_sleep(60)")
|
||||
wg.Done()
|
||||
}()
|
||||
runtime.Gosched()
|
||||
go func() {
|
||||
l.ExecSimpleQuery("SELECT 1")
|
||||
wg.Done()
|
||||
}()
|
||||
// give the two goroutines some time to get into position
|
||||
runtime.Gosched()
|
||||
// calls Close on the net.Conn; equivalent to a network failure
|
||||
l.Close()
|
||||
|
||||
var done int32 = 0
|
||||
go func() {
|
||||
time.Sleep(10 * time.Second)
|
||||
if atomic.LoadInt32(&done) != 1 {
|
||||
panic("timed out")
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
atomic.StoreInt32(&done, 1)
|
||||
}
|
||||
|
||||
// Test for ListenerConn being closed while a slow query is executing
|
||||
func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) {
|
||||
l, _ := newTestListenerConn(t)
|
||||
defer l.Close()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)")
|
||||
if sent {
|
||||
panic("expected sent=false")
|
||||
}
|
||||
// could be any of a number of errors
|
||||
if err == nil {
|
||||
panic("expected error")
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
// give the above goroutine some time to get into position
|
||||
runtime.Gosched()
|
||||
err := l.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var done int32 = 0
|
||||
go func() {
|
||||
time.Sleep(10 * time.Second)
|
||||
if atomic.LoadInt32(&done) != 1 {
|
||||
panic("timed out")
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
atomic.StoreInt32(&done, 1)
|
||||
}
|
||||
|
||||
func TestNotifyExtra(t *testing.T) {
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
if getServerVersion(t, db) < 90000 {
|
||||
t.Skip("skipping NOTIFY payload test since the server does not appear to support it")
|
||||
}
|
||||
|
||||
l, channel := newTestListenerConn(t)
|
||||
defer l.Close()
|
||||
|
||||
ok, err := l.Listen("notify_test")
|
||||
if !ok || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_test, 'something'")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, channel, "notify_test", "something")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// create a new test listener and also set the timeouts
|
||||
func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) {
|
||||
datname := os.Getenv("PGDATABASE")
|
||||
sslmode := os.Getenv("PGSSLMODE")
|
||||
|
||||
if datname == "" {
|
||||
os.Setenv("PGDATABASE", "pqgotest")
|
||||
}
|
||||
|
||||
if sslmode == "" {
|
||||
os.Setenv("PGSSLMODE", "disable")
|
||||
}
|
||||
|
||||
eventch := make(chan ListenerEventType, 16)
|
||||
l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t })
|
||||
err := expectEvent(t, eventch, ListenerEventConnected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return l, eventch
|
||||
}
|
||||
|
||||
func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) {
|
||||
return newTestListenerTimeout(t, time.Hour, time.Hour)
|
||||
}
|
||||
|
||||
func TestListenerListen(t *testing.T) {
|
||||
l, _ := newTestListener(t)
|
||||
defer l.Close()
|
||||
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
err := l.Listen("notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, l.Notify, "notify_listen_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerUnlisten(t *testing.T) {
|
||||
l, _ := newTestListener(t)
|
||||
defer l.Close()
|
||||
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
err := l.Listen("notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = l.Unlisten("notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, l.Notify, "notify_listen_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNoNotification(t, l.Notify)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerUnlistenAll(t *testing.T) {
|
||||
l, _ := newTestListener(t)
|
||||
defer l.Close()
|
||||
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
err := l.Listen("notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = l.UnlistenAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, l.Notify, "notify_listen_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNoNotification(t, l.Notify)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerFailedQuery(t *testing.T) {
|
||||
l, eventch := newTestListener(t)
|
||||
defer l.Close()
|
||||
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
err := l.Listen("notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, l.Notify, "notify_listen_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// shouldn't cause a disconnect
|
||||
ok, err := l.cn.ExecSimpleQuery("SELECT error")
|
||||
if !ok {
|
||||
t.Fatalf("could not send query to server: %v", err)
|
||||
}
|
||||
_, ok = err.(PGError)
|
||||
if !ok {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
err = expectNoEvent(t, eventch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// should still work
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, l.Notify, "notify_listen_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerReconnect(t *testing.T) {
|
||||
l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
|
||||
defer l.Close()
|
||||
|
||||
db := openTestConn(t)
|
||||
defer db.Close()
|
||||
|
||||
err := l.Listen("notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, l.Notify, "notify_listen_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// kill the connection and make sure it comes back up
|
||||
ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())")
|
||||
if ok {
|
||||
t.Fatalf("could not kill the connection: %v", err)
|
||||
}
|
||||
if err != io.EOF {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
err = expectEvent(t, eventch, ListenerEventDisconnected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = expectEvent(t, eventch, ListenerEventReconnected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// should still work
|
||||
_, err = db.Exec("NOTIFY notify_listen_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// should get nil after Reconnected
|
||||
err = expectNotification(t, l.Notify, "", "")
|
||||
if err != errNilNotification {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = expectNotification(t, l.Notify, "notify_listen_test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerClose(t *testing.T) {
|
||||
l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
|
||||
defer l.Close()
|
||||
|
||||
err := l.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = l.Close()
|
||||
if err != errListenerClosed {
|
||||
t.Fatalf("expected errListenerClosed; got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerPing(t *testing.T) {
|
||||
l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
|
||||
defer l.Close()
|
||||
|
||||
err := l.Ping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = l.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = l.Ping()
|
||||
if err != errListenerClosed {
|
||||
t.Fatalf("expected errListenerClosed; got %v", err)
|
||||
}
|
||||
}
|
6
vendor/github.com/lib/pq/oid/doc.go
generated
vendored
Normal file
6
vendor/github.com/lib/pq/oid/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
// Package oid contains OID constants
|
||||
// as defined by the Postgres server.
|
||||
package oid
|
||||
|
||||
// Oid is a Postgres Object ID.
|
||||
type Oid uint32
|
74
vendor/github.com/lib/pq/oid/gen.go
generated
vendored
Normal file
74
vendor/github.com/lib/pq/oid/gen.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
// +build ignore
|
||||
|
||||
// Generate the table of OID values
|
||||
// Run with 'go run gen.go'.
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func main() {
|
||||
datname := os.Getenv("PGDATABASE")
|
||||
sslmode := os.Getenv("PGSSLMODE")
|
||||
|
||||
if datname == "" {
|
||||
os.Setenv("PGDATABASE", "pqgotest")
|
||||
}
|
||||
|
||||
if sslmode == "" {
|
||||
os.Setenv("PGSSLMODE", "disable")
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd := exec.Command("gofmt")
|
||||
cmd.Stderr = os.Stderr
|
||||
w, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
f, err := os.Create("types.go")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd.Stdout = f
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintln(w, "// generated by 'go run gen.go'; do not edit")
|
||||
fmt.Fprintln(w, "\npackage oid")
|
||||
fmt.Fprintln(w, "const (")
|
||||
rows, err := db.Query(`
|
||||
SELECT typname, oid
|
||||
FROM pg_type WHERE oid < 10000
|
||||
ORDER BY oid;
|
||||
`)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var name string
|
||||
var oid int
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&name, &oid)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(w, "T_%s Oid = %d\n", name, oid)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintln(w, ")")
|
||||
w.Close()
|
||||
cmd.Wait()
|
||||
}
|
161
vendor/github.com/lib/pq/oid/types.go
generated
vendored
Normal file
161
vendor/github.com/lib/pq/oid/types.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
// generated by 'go run gen.go'; do not edit
|
||||
|
||||
package oid
|
||||
|
||||
const (
|
||||
T_bool Oid = 16
|
||||
T_bytea Oid = 17
|
||||
T_char Oid = 18
|
||||
T_name Oid = 19
|
||||
T_int8 Oid = 20
|
||||
T_int2 Oid = 21
|
||||
T_int2vector Oid = 22
|
||||
T_int4 Oid = 23
|
||||
T_regproc Oid = 24
|
||||
T_text Oid = 25
|
||||
T_oid Oid = 26
|
||||
T_tid Oid = 27
|
||||
T_xid Oid = 28
|
||||
T_cid Oid = 29
|
||||
T_oidvector Oid = 30
|
||||
T_pg_type Oid = 71
|
||||
T_pg_attribute Oid = 75
|
||||
T_pg_proc Oid = 81
|
||||
T_pg_class Oid = 83
|
||||
T_json Oid = 114
|
||||
T_xml Oid = 142
|
||||
T__xml Oid = 143
|
||||
T_pg_node_tree Oid = 194
|
||||
T__json Oid = 199
|
||||
T_smgr Oid = 210
|
||||
T_point Oid = 600
|
||||
T_lseg Oid = 601
|
||||
T_path Oid = 602
|
||||
T_box Oid = 603
|
||||
T_polygon Oid = 604
|
||||
T_line Oid = 628
|
||||
T__line Oid = 629
|
||||
T_cidr Oid = 650
|
||||
T__cidr Oid = 651
|
||||
T_float4 Oid = 700
|
||||
T_float8 Oid = 701
|
||||
T_abstime Oid = 702
|
||||
T_reltime Oid = 703
|
||||
T_tinterval Oid = 704
|
||||
T_unknown Oid = 705
|
||||
T_circle Oid = 718
|
||||
T__circle Oid = 719
|
||||
T_money Oid = 790
|
||||
T__money Oid = 791
|
||||
T_macaddr Oid = 829
|
||||
T_inet Oid = 869
|
||||
T__bool Oid = 1000
|
||||
T__bytea Oid = 1001
|
||||
T__char Oid = 1002
|
||||
T__name Oid = 1003
|
||||
T__int2 Oid = 1005
|
||||
T__int2vector Oid = 1006
|
||||
T__int4 Oid = 1007
|
||||
T__regproc Oid = 1008
|
||||
T__text Oid = 1009
|
||||
T__tid Oid = 1010
|
||||
T__xid Oid = 1011
|
||||
T__cid Oid = 1012
|
||||
T__oidvector Oid = 1013
|
||||
T__bpchar Oid = 1014
|
||||
T__varchar Oid = 1015
|
||||
T__int8 Oid = 1016
|
||||
T__point Oid = 1017
|
||||
T__lseg Oid = 1018
|
||||
T__path Oid = 1019
|
||||
T__box Oid = 1020
|
||||
T__float4 Oid = 1021
|
||||
T__float8 Oid = 1022
|
||||
T__abstime Oid = 1023
|
||||
T__reltime Oid = 1024
|
||||
T__tinterval Oid = 1025
|
||||
T__polygon Oid = 1027
|
||||
T__oid Oid = 1028
|
||||
T_aclitem Oid = 1033
|
||||
T__aclitem Oid = 1034
|
||||
T__macaddr Oid = 1040
|
||||
T__inet Oid = 1041
|
||||
T_bpchar Oid = 1042
|
||||
T_varchar Oid = 1043
|
||||
T_date Oid = 1082
|
||||
T_time Oid = 1083
|
||||
T_timestamp Oid = 1114
|
||||
T__timestamp Oid = 1115
|
||||
T__date Oid = 1182
|
||||
T__time Oid = 1183
|
||||
T_timestamptz Oid = 1184
|
||||
T__timestamptz Oid = 1185
|
||||
T_interval Oid = 1186
|
||||
T__interval Oid = 1187
|
||||
T__numeric Oid = 1231
|
||||
T_pg_database Oid = 1248
|
||||
T__cstring Oid = 1263
|
||||
T_timetz Oid = 1266
|
||||
T__timetz Oid = 1270
|
||||
T_bit Oid = 1560
|
||||
T__bit Oid = 1561
|
||||
T_varbit Oid = 1562
|
||||
T__varbit Oid = 1563
|
||||
T_numeric Oid = 1700
|
||||
T_refcursor Oid = 1790
|
||||
T__refcursor Oid = 2201
|
||||
T_regprocedure Oid = 2202
|
||||
T_regoper Oid = 2203
|
||||
T_regoperator Oid = 2204
|
||||
T_regclass Oid = 2205
|
||||
T_regtype Oid = 2206
|
||||
T__regprocedure Oid = 2207
|
||||
T__regoper Oid = 2208
|
||||
T__regoperator Oid = 2209
|
||||
T__regclass Oid = 2210
|
||||
T__regtype Oid = 2211
|
||||
T_record Oid = 2249
|
||||
T_cstring Oid = 2275
|
||||
T_any Oid = 2276
|
||||
T_anyarray Oid = 2277
|
||||
T_void Oid = 2278
|
||||
T_trigger Oid = 2279
|
||||
T_language_handler Oid = 2280
|
||||
T_internal Oid = 2281
|
||||
T_opaque Oid = 2282
|
||||
T_anyelement Oid = 2283
|
||||
T__record Oid = 2287
|
||||
T_anynonarray Oid = 2776
|
||||
T_pg_authid Oid = 2842
|
||||
T_pg_auth_members Oid = 2843
|
||||
T__txid_snapshot Oid = 2949
|
||||
T_uuid Oid = 2950
|
||||
T__uuid Oid = 2951
|
||||
T_txid_snapshot Oid = 2970
|
||||
T_fdw_handler Oid = 3115
|
||||
T_anyenum Oid = 3500
|
||||
T_tsvector Oid = 3614
|
||||
T_tsquery Oid = 3615
|
||||
T_gtsvector Oid = 3642
|
||||
T__tsvector Oid = 3643
|
||||
T__gtsvector Oid = 3644
|
||||
T__tsquery Oid = 3645
|
||||
T_regconfig Oid = 3734
|
||||
T__regconfig Oid = 3735
|
||||
T_regdictionary Oid = 3769
|
||||
T__regdictionary Oid = 3770
|
||||
T_anyrange Oid = 3831
|
||||
T_event_trigger Oid = 3838
|
||||
T_int4range Oid = 3904
|
||||
T__int4range Oid = 3905
|
||||
T_numrange Oid = 3906
|
||||
T__numrange Oid = 3907
|
||||
T_tsrange Oid = 3908
|
||||
T__tsrange Oid = 3909
|
||||
T_tstzrange Oid = 3910
|
||||
T__tstzrange Oid = 3911
|
||||
T_daterange Oid = 3912
|
||||
T__daterange Oid = 3913
|
||||
T_int8range Oid = 3926
|
||||
T__int8range Oid = 3927
|
||||
)
|
269
vendor/github.com/lib/pq/ssl_test.go
generated
vendored
Normal file
269
vendor/github.com/lib/pq/ssl_test.go
generated
vendored
Normal file
|
@ -0,0 +1,269 @@
|
|||
package pq
|
||||
|
||||
// This file contains SSL tests
|
||||
|
||||
import (
|
||||
_ "crypto/sha256"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func maybeSkipSSLTests(t *testing.T) {
|
||||
// Require some special variables for testing certificates
|
||||
if os.Getenv("PQSSLCERTTEST_PATH") == "" {
|
||||
t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests")
|
||||
}
|
||||
|
||||
value := os.Getenv("PQGOSSLTESTS")
|
||||
if value == "" || value == "0" {
|
||||
t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests")
|
||||
} else if value != "1" {
|
||||
t.Fatalf("unexpected value %q for PQGOSSLTESTS", value)
|
||||
}
|
||||
}
|
||||
|
||||
func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) {
|
||||
db, err := openTestConnConninfo(conninfo)
|
||||
if err != nil {
|
||||
// should never fail
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Do something with the connection to see whether it's working or not.
|
||||
tx, err := db.Begin()
|
||||
if err == nil {
|
||||
return db, tx.Rollback()
|
||||
}
|
||||
_ = db.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func checkSSLSetup(t *testing.T, conninfo string) {
|
||||
db, err := openSSLConn(t, conninfo)
|
||||
if err == nil {
|
||||
db.Close()
|
||||
t.Fatalf("expected error with conninfo=%q", conninfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Connect over SSL and run a simple query to test the basics
|
||||
func TestSSLConnection(t *testing.T) {
|
||||
maybeSkipSSLTests(t)
|
||||
// Environment sanity check: should fail without SSL
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
db, err := openSSLConn(t, "sslmode=require user=pqgossltest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rows, err := db.Query("SELECT 1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
// Test sslmode=verify-full
|
||||
func TestSSLVerifyFull(t *testing.T) {
|
||||
maybeSkipSSLTests(t)
|
||||
// Environment sanity check: should fail without SSL
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
// Not OK according to the system CA
|
||||
_, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest")
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
_, ok := err.(x509.UnknownAuthorityError)
|
||||
if !ok {
|
||||
t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
|
||||
}
|
||||
|
||||
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
|
||||
rootCert := "sslrootcert=" + rootCertPath + " "
|
||||
// No match on Common Name
|
||||
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest")
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
_, ok = err.(x509.HostnameError)
|
||||
if !ok {
|
||||
t.Fatalf("expected x509.HostnameError, got %#+v", err)
|
||||
}
|
||||
// OK
|
||||
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test sslmode=require sslrootcert=rootCertPath
|
||||
func TestSSLRequireWithRootCert(t *testing.T) {
|
||||
maybeSkipSSLTests(t)
|
||||
// Environment sanity check: should fail without SSL
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
bogusRootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "bogus_root.crt")
|
||||
bogusRootCert := "sslrootcert=" + bogusRootCertPath + " "
|
||||
|
||||
// Not OK according to the bogus CA
|
||||
_, err := openSSLConn(t, bogusRootCert+"host=postgres sslmode=require user=pqgossltest")
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
_, ok := err.(x509.UnknownAuthorityError)
|
||||
if !ok {
|
||||
t.Fatalf("expected x509.UnknownAuthorityError, got %s, %#+v", err, err)
|
||||
}
|
||||
|
||||
nonExistentCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "non_existent.crt")
|
||||
nonExistentCert := "sslrootcert=" + nonExistentCertPath + " "
|
||||
|
||||
// No match on Common Name, but that's OK because we're not validating anything.
|
||||
_, err = openSSLConn(t, nonExistentCert+"host=127.0.0.1 sslmode=require user=pqgossltest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
|
||||
rootCert := "sslrootcert=" + rootCertPath + " "
|
||||
|
||||
// No match on Common Name, but that's OK because we're not validating the CN.
|
||||
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=require user=pqgossltest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Everything OK
|
||||
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=require user=pqgossltest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test sslmode=verify-ca
|
||||
func TestSSLVerifyCA(t *testing.T) {
|
||||
maybeSkipSSLTests(t)
|
||||
// Environment sanity check: should fail without SSL
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
// Not OK according to the system CA
|
||||
_, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
_, ok := err.(x509.UnknownAuthorityError)
|
||||
if !ok {
|
||||
t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
|
||||
}
|
||||
|
||||
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
|
||||
rootCert := "sslrootcert=" + rootCertPath + " "
|
||||
// No match on Common Name, but that's OK
|
||||
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Everything OK
|
||||
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getCertConninfo(t *testing.T, source string) string {
|
||||
var sslkey string
|
||||
var sslcert string
|
||||
|
||||
certpath := os.Getenv("PQSSLCERTTEST_PATH")
|
||||
|
||||
switch source {
|
||||
case "missingkey":
|
||||
sslkey = "/tmp/filedoesnotexist"
|
||||
sslcert = filepath.Join(certpath, "postgresql.crt")
|
||||
case "missingcert":
|
||||
sslkey = filepath.Join(certpath, "postgresql.key")
|
||||
sslcert = "/tmp/filedoesnotexist"
|
||||
case "certtwice":
|
||||
sslkey = filepath.Join(certpath, "postgresql.crt")
|
||||
sslcert = filepath.Join(certpath, "postgresql.crt")
|
||||
case "valid":
|
||||
sslkey = filepath.Join(certpath, "postgresql.key")
|
||||
sslcert = filepath.Join(certpath, "postgresql.crt")
|
||||
default:
|
||||
t.Fatalf("invalid source %q", source)
|
||||
}
|
||||
return fmt.Sprintf("sslmode=require user=pqgosslcert sslkey=%s sslcert=%s", sslkey, sslcert)
|
||||
}
|
||||
|
||||
// Authenticate over SSL using client certificates
|
||||
func TestSSLClientCertificates(t *testing.T) {
|
||||
maybeSkipSSLTests(t)
|
||||
// Environment sanity check: should fail without SSL
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
// Should also fail without a valid certificate
|
||||
db, err := openSSLConn(t, "sslmode=require user=pqgosslcert")
|
||||
if err == nil {
|
||||
db.Close()
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
pge, ok := err.(*Error)
|
||||
if !ok {
|
||||
t.Fatal("expected pq.Error")
|
||||
}
|
||||
if pge.Code.Name() != "invalid_authorization_specification" {
|
||||
t.Fatalf("unexpected error code %q", pge.Code.Name())
|
||||
}
|
||||
|
||||
// Should work
|
||||
db, err = openSSLConn(t, getCertConninfo(t, "valid"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rows, err := db.Query("SELECT 1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
// Test errors with ssl certificates
|
||||
func TestSSLClientCertificatesMissingFiles(t *testing.T) {
|
||||
maybeSkipSSLTests(t)
|
||||
// Environment sanity check: should fail without SSL
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
// Key missing, should fail
|
||||
_, err := openSSLConn(t, getCertConninfo(t, "missingkey"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
// should be a PathError
|
||||
_, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
t.Fatalf("expected PathError, got %#+v", err)
|
||||
}
|
||||
|
||||
// Cert missing, should fail
|
||||
_, err = openSSLConn(t, getCertConninfo(t, "missingcert"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
// should be a PathError
|
||||
_, ok = err.(*os.PathError)
|
||||
if !ok {
|
||||
t.Fatalf("expected PathError, got %#+v", err)
|
||||
}
|
||||
|
||||
// Key has wrong permissions, should fail
|
||||
_, err = openSSLConn(t, getCertConninfo(t, "certtwice"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
if err != ErrSSLKeyHasWorldPermissions {
|
||||
t.Fatalf("expected ErrSSLKeyHasWorldPermissions, got %#+v", err)
|
||||
}
|
||||
}
|
76
vendor/github.com/lib/pq/url.go
generated
vendored
Normal file
76
vendor/github.com/lib/pq/url.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
package pq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
nurl "net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseURL no longer needs to be used by clients of this library since supplying a URL as a
|
||||
// connection string to sql.Open() is now supported:
|
||||
//
|
||||
// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full")
|
||||
//
|
||||
// It remains exported here for backwards-compatibility.
|
||||
//
|
||||
// ParseURL converts a url to a connection string for driver.Open.
|
||||
// Example:
|
||||
//
|
||||
// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"
|
||||
//
|
||||
// converts to:
|
||||
//
|
||||
// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
|
||||
//
|
||||
// A minimal example:
|
||||
//
|
||||
// "postgres://"
|
||||
//
|
||||
// This will be blank, causing driver.Open to use all of the defaults
|
||||
func ParseURL(url string) (string, error) {
|
||||
u, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if u.Scheme != "postgres" && u.Scheme != "postgresql" {
|
||||
return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
|
||||
}
|
||||
|
||||
var kvs []string
|
||||
escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
|
||||
accrue := func(k, v string) {
|
||||
if v != "" {
|
||||
kvs = append(kvs, k+"="+escaper.Replace(v))
|
||||
}
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
v := u.User.Username()
|
||||
accrue("user", v)
|
||||
|
||||
v, _ = u.User.Password()
|
||||
accrue("password", v)
|
||||
}
|
||||
|
||||
if host, port, err := net.SplitHostPort(u.Host); err != nil {
|
||||
accrue("host", u.Host)
|
||||
} else {
|
||||
accrue("host", host)
|
||||
accrue("port", port)
|
||||
}
|
||||
|
||||
if u.Path != "" {
|
||||
accrue("dbname", u.Path[1:])
|
||||
}
|
||||
|
||||
q := u.Query()
|
||||
for k := range q {
|
||||
accrue(k, q.Get(k))
|
||||
}
|
||||
|
||||
sort.Strings(kvs) // Makes testing easier (not a performance concern)
|
||||
return strings.Join(kvs, " "), nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue