vendor: make revendor
This commit is contained in:
parent
172df9ccef
commit
eaeab218b8
162 changed files with 13796 additions and 844 deletions
2
go.mod
2
go.mod
|
@ -46,12 +46,14 @@ require (
|
||||||
github.com/ugorji/go/codec v0.0.0-20181127175209-856da096dbdf // indirect
|
github.com/ugorji/go/codec v0.0.0-20181127175209-856da096dbdf // indirect
|
||||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect
|
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20160711182412-2c99acdd1e9b
|
golang.org/x/crypto v0.0.0-20160711182412-2c99acdd1e9b
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3
|
||||||
golang.org/x/net v0.0.0-20170413175226-5602c733f70a
|
golang.org/x/net v0.0.0-20170413175226-5602c733f70a
|
||||||
golang.org/x/oauth2 v0.0.0-20160718223228-08c8d727d239
|
golang.org/x/oauth2 v0.0.0-20160718223228-08c8d727d239
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect
|
||||||
golang.org/x/sys v0.0.0-20151211033651-833a04a10549 // indirect
|
golang.org/x/sys v0.0.0-20151211033651-833a04a10549 // indirect
|
||||||
golang.org/x/text v0.0.0-20170401064109-f4b4367115ec // indirect
|
golang.org/x/text v0.0.0-20170401064109-f4b4367115ec // indirect
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c // indirect
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c // indirect
|
||||||
|
golang.org/x/tools v0.0.0-20181201035826-d0ca3933b724 // indirect
|
||||||
google.golang.org/appengine v0.0.0-20160621060416-267c27e74922 // indirect
|
google.golang.org/appengine v0.0.0-20160621060416-267c27e74922 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20170404132009-411e09b969b1 // indirect
|
google.golang.org/genproto v0.0.0-20170404132009-411e09b969b1 // indirect
|
||||||
google.golang.org/grpc v0.0.0-20170413033559-0e8b58d22f34
|
google.golang.org/grpc v0.0.0-20170413033559-0e8b58d22f34
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -91,6 +91,8 @@ github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4M
|
||||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
golang.org/x/crypto v0.0.0-20160711182412-2c99acdd1e9b h1:2jrSTd+N8P7Wg79ClE25uXmVIE7WluBSpTSsQQ2Nypo=
|
golang.org/x/crypto v0.0.0-20160711182412-2c99acdd1e9b h1:2jrSTd+N8P7Wg79ClE25uXmVIE7WluBSpTSsQQ2Nypo=
|
||||||
golang.org/x/crypto v0.0.0-20160711182412-2c99acdd1e9b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20160711182412-2c99acdd1e9b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 h1:x/bBzNauLQAlE3fLku/xy92Y8QwKX5HZymrMz2IiKFc=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/net v0.0.0-20170413175226-5602c733f70a h1:U+RBxJXt1cn83eNU5KfO0ABG27IrDKWhrbr9MBCri/s=
|
golang.org/x/net v0.0.0-20170413175226-5602c733f70a h1:U+RBxJXt1cn83eNU5KfO0ABG27IrDKWhrbr9MBCri/s=
|
||||||
golang.org/x/net v0.0.0-20170413175226-5602c733f70a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20170413175226-5602c733f70a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/oauth2 v0.0.0-20160718223228-08c8d727d239 h1:zW4VTIvN4l/liomF2DkpwzM8vz+Xlp9lO06+Z32c91U=
|
golang.org/x/oauth2 v0.0.0-20160718223228-08c8d727d239 h1:zW4VTIvN4l/liomF2DkpwzM8vz+Xlp9lO06+Z32c91U=
|
||||||
|
@ -103,6 +105,8 @@ golang.org/x/text v0.0.0-20170401064109-f4b4367115ec h1:IQbbXMrYo9hsfbt8unKNTFiv
|
||||||
golang.org/x/text v0.0.0-20170401064109-f4b4367115ec/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170401064109-f4b4367115ec/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181201035826-d0ca3933b724 h1:eV9myT/I6o1p8salzgZ0f1pz54PEgUf2NkCxEf6t+xs=
|
||||||
|
golang.org/x/tools v0.0.0-20181201035826-d0ca3933b724/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
google.golang.org/appengine v0.0.0-20160621060416-267c27e74922 h1:gHCUPwst+IxLPSNSG+jClGv+Dz5Tly/Y8D73bvQwIXM=
|
google.golang.org/appengine v0.0.0-20160621060416-267c27e74922 h1:gHCUPwst+IxLPSNSG+jClGv+Dz5Tly/Y8D73bvQwIXM=
|
||||||
google.golang.org/appengine v0.0.0-20160621060416-267c27e74922/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v0.0.0-20160621060416-267c27e74922/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/genproto v0.0.0-20170404132009-411e09b969b1 h1:HEurpBgyZQ15ngKW89Tw1gNMezbyH69oBSyrKIBvOLw=
|
google.golang.org/genproto v0.0.0-20170404132009-411e09b969b1 h1:HEurpBgyZQ15ngKW89Tw1gNMezbyH69oBSyrKIBvOLw=
|
||||||
|
|
16
vendor/github.com/beevik/etree/.travis.yml
generated
vendored
Normal file
16
vendor/github.com/beevik/etree/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
language: go
|
||||||
|
sudo: false
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.4.2
|
||||||
|
- 1.5.1
|
||||||
|
- 1.6
|
||||||
|
- tip
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go vet ./...
|
||||||
|
- go test -v ./...
|
7
vendor/github.com/beevik/etree/CONTRIBUTORS
generated
vendored
Normal file
7
vendor/github.com/beevik/etree/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
Brett Vickers (beevik)
|
||||||
|
Felix Geisendörfer (felixge)
|
||||||
|
Kamil Kisiel (kisielk)
|
||||||
|
Graham King (grahamking)
|
||||||
|
Matt Smith (ma314smith)
|
||||||
|
Michal Jemala (michaljemala)
|
||||||
|
Nicolas Piganeau (npiganeau)
|
203
vendor/github.com/beevik/etree/README.md
generated
vendored
Normal file
203
vendor/github.com/beevik/etree/README.md
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
[![Build Status](https://travis-ci.org/beevik/etree.svg?branch=master)](https://travis-ci.org/beevik/etree)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/beevik/etree?status.svg)](https://godoc.org/github.com/beevik/etree)
|
||||||
|
|
||||||
|
etree
|
||||||
|
=====
|
||||||
|
|
||||||
|
The etree package is a lightweight, pure go package that expresses XML in
|
||||||
|
the form of an element tree. Its design was inspired by the Python
|
||||||
|
[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html)
|
||||||
|
module. Some of the package's features include:
|
||||||
|
|
||||||
|
* Represents XML documents as trees of elements for easy traversal.
|
||||||
|
* Imports, serializes, modifies or creates XML documents from scratch.
|
||||||
|
* Writes and reads XML to/from files, byte slices, strings and io interfaces.
|
||||||
|
* Performs simple or complex searches with lightweight XPath-like query APIs.
|
||||||
|
* Auto-indents XML using spaces or tabs for better readability.
|
||||||
|
* Implemented in pure go; depends only on standard go libraries.
|
||||||
|
* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml)
|
||||||
|
package.
|
||||||
|
|
||||||
|
### Creating an XML document
|
||||||
|
|
||||||
|
The following example creates an XML document from scratch using the etree
|
||||||
|
package and outputs its indented contents to stdout.
|
||||||
|
```go
|
||||||
|
doc := etree.NewDocument()
|
||||||
|
doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
|
||||||
|
doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`)
|
||||||
|
|
||||||
|
people := doc.CreateElement("People")
|
||||||
|
people.CreateComment("These are all known people")
|
||||||
|
|
||||||
|
jon := people.CreateElement("Person")
|
||||||
|
jon.CreateAttr("name", "Jon")
|
||||||
|
|
||||||
|
sally := people.CreateElement("Person")
|
||||||
|
sally.CreateAttr("name", "Sally")
|
||||||
|
|
||||||
|
doc.Indent(2)
|
||||||
|
doc.WriteTo(os.Stdout)
|
||||||
|
```
|
||||||
|
|
||||||
|
Output:
|
||||||
|
```xml
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<?xml-stylesheet type="text/xsl" href="style.xsl"?>
|
||||||
|
<People>
|
||||||
|
<!--These are all known people-->
|
||||||
|
<Person name="Jon"/>
|
||||||
|
<Person name="Sally"/>
|
||||||
|
</People>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reading an XML file
|
||||||
|
|
||||||
|
Suppose you have a file on disk called `bookstore.xml` containing the
|
||||||
|
following data:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<bookstore xmlns:p="urn:schemas-books-com:prices">
|
||||||
|
|
||||||
|
<book category="COOKING">
|
||||||
|
<title lang="en">Everyday Italian</title>
|
||||||
|
<author>Giada De Laurentiis</author>
|
||||||
|
<year>2005</year>
|
||||||
|
<p:price>30.00</p:price>
|
||||||
|
</book>
|
||||||
|
|
||||||
|
<book category="CHILDREN">
|
||||||
|
<title lang="en">Harry Potter</title>
|
||||||
|
<author>J K. Rowling</author>
|
||||||
|
<year>2005</year>
|
||||||
|
<p:price>29.99</p:price>
|
||||||
|
</book>
|
||||||
|
|
||||||
|
<book category="WEB">
|
||||||
|
<title lang="en">XQuery Kick Start</title>
|
||||||
|
<author>James McGovern</author>
|
||||||
|
<author>Per Bothner</author>
|
||||||
|
<author>Kurt Cagle</author>
|
||||||
|
<author>James Linn</author>
|
||||||
|
<author>Vaidyanathan Nagarajan</author>
|
||||||
|
<year>2003</year>
|
||||||
|
<p:price>49.99</p:price>
|
||||||
|
</book>
|
||||||
|
|
||||||
|
<book category="WEB">
|
||||||
|
<title lang="en">Learning XML</title>
|
||||||
|
<author>Erik T. Ray</author>
|
||||||
|
<year>2003</year>
|
||||||
|
<p:price>39.95</p:price>
|
||||||
|
</book>
|
||||||
|
|
||||||
|
</bookstore>
|
||||||
|
```
|
||||||
|
|
||||||
|
This code reads the file's contents into an etree document.
|
||||||
|
```go
|
||||||
|
doc := etree.NewDocument()
|
||||||
|
if err := doc.ReadFromFile("bookstore.xml"); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also read XML from a string, a byte slice, or an `io.Reader`.
|
||||||
|
|
||||||
|
### Processing elements and attributes
|
||||||
|
|
||||||
|
This example illustrates several ways to access elements and attributes using
|
||||||
|
etree selection queries.
|
||||||
|
```go
|
||||||
|
root := doc.SelectElement("bookstore")
|
||||||
|
fmt.Println("ROOT element:", root.Tag)
|
||||||
|
|
||||||
|
for _, book := range root.SelectElements("book") {
|
||||||
|
fmt.Println("CHILD element:", book.Tag)
|
||||||
|
if title := book.SelectElement("title"); title != nil {
|
||||||
|
lang := title.SelectAttrValue("lang", "unknown")
|
||||||
|
fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang)
|
||||||
|
}
|
||||||
|
for _, attr := range book.Attr {
|
||||||
|
fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Output:
|
||||||
|
```
|
||||||
|
ROOT element: bookstore
|
||||||
|
CHILD element: book
|
||||||
|
TITLE: Everyday Italian (en)
|
||||||
|
ATTR: category=COOKING
|
||||||
|
CHILD element: book
|
||||||
|
TITLE: Harry Potter (en)
|
||||||
|
ATTR: category=CHILDREN
|
||||||
|
CHILD element: book
|
||||||
|
TITLE: XQuery Kick Start (en)
|
||||||
|
ATTR: category=WEB
|
||||||
|
CHILD element: book
|
||||||
|
TITLE: Learning XML (en)
|
||||||
|
ATTR: category=WEB
|
||||||
|
```
|
||||||
|
|
||||||
|
### Path queries
|
||||||
|
|
||||||
|
This example uses etree's path functions to select all book titles that fall
|
||||||
|
into the category of 'WEB'. The double-slash prefix in the path causes the
|
||||||
|
search for book elements to occur recursively; book elements may appear at any
|
||||||
|
level of the XML hierarchy.
|
||||||
|
```go
|
||||||
|
for _, t := range doc.FindElements("//book[@category='WEB']/title") {
|
||||||
|
fmt.Println("Title:", t.Text())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Output:
|
||||||
|
```
|
||||||
|
Title: XQuery Kick Start
|
||||||
|
Title: Learning XML
|
||||||
|
```
|
||||||
|
|
||||||
|
This example finds the first book element under the root bookstore element and
|
||||||
|
outputs the tag and text of each of its child elements.
|
||||||
|
```go
|
||||||
|
for _, e := range doc.FindElements("./bookstore/book[1]/*") {
|
||||||
|
fmt.Printf("%s: %s\n", e.Tag, e.Text())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Output:
|
||||||
|
```
|
||||||
|
title: Everyday Italian
|
||||||
|
author: Giada De Laurentiis
|
||||||
|
year: 2005
|
||||||
|
price: 30.00
|
||||||
|
```
|
||||||
|
|
||||||
|
This example finds all books with a price of 49.99 and outputs their titles.
|
||||||
|
```go
|
||||||
|
path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title")
|
||||||
|
for _, e := range doc.FindElementsPath(path) {
|
||||||
|
fmt.Println(e.Text())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Output:
|
||||||
|
```
|
||||||
|
XQuery Kick Start
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that this example uses the FindElementsPath function, which takes as an
|
||||||
|
argument a pre-compiled path object. Use precompiled paths when you plan to
|
||||||
|
search with the same path more than once.
|
||||||
|
|
||||||
|
###Other features
|
||||||
|
|
||||||
|
These are just a few examples of the things the etree package can do. See the
|
||||||
|
[documentation](http://godoc.org/github.com/beevik/etree) for a complete
|
||||||
|
description of its capabilities.
|
||||||
|
|
||||||
|
###Contributing
|
||||||
|
|
||||||
|
This project accepts contributions. Just fork the repo and submit a pull
|
||||||
|
request!
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
37
vendor/github.com/coreos/etcd/auth/authpb/auth.proto
generated
vendored
Normal file
37
vendor/github.com/coreos/etcd/auth/authpb/auth.proto
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package authpb;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
option (gogoproto.goproto_enum_prefix_all) = false;
|
||||||
|
|
||||||
|
// User is a single entry in the bucket authUsers
|
||||||
|
message User {
|
||||||
|
bytes name = 1;
|
||||||
|
bytes password = 2;
|
||||||
|
repeated string roles = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permission is a single entity
|
||||||
|
message Permission {
|
||||||
|
enum Type {
|
||||||
|
READ = 0;
|
||||||
|
WRITE = 1;
|
||||||
|
READWRITE = 2;
|
||||||
|
}
|
||||||
|
Type permType = 1;
|
||||||
|
|
||||||
|
bytes key = 2;
|
||||||
|
bytes range_end = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Role is a single entry in the bucket authRoles
|
||||||
|
message Role {
|
||||||
|
bytes name = 1;
|
||||||
|
|
||||||
|
repeated Permission keyPermission = 2;
|
||||||
|
}
|
85
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
Normal file
85
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
# etcd/clientv3
|
||||||
|
|
||||||
|
[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
|
||||||
|
|
||||||
|
`etcd/clientv3` is the official Go etcd client for v3.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get github.com/coreos/etcd/clientv3
|
||||||
|
```
|
||||||
|
|
||||||
|
## Get started
|
||||||
|
|
||||||
|
Create client using `clientv3.New`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
cli, err := clientv3.New(clientv3.Config{
|
||||||
|
Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
||||||
|
DialTimeout: 5 * time.Second,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// handle error!
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
```
|
||||||
|
|
||||||
|
etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses
|
||||||
|
[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
|
||||||
|
If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
|
||||||
|
pass `context.WithTimeout` to APIs:
|
||||||
|
|
||||||
|
```go
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
resp, err := cli.Put(ctx, "sample_key", "sample_value")
|
||||||
|
cancel()
|
||||||
|
if err != nil {
|
||||||
|
// handle error!
|
||||||
|
}
|
||||||
|
// use the response
|
||||||
|
```
|
||||||
|
|
||||||
|
etcd uses `cmd/vendor` directory to store external dependencies, which are
|
||||||
|
to be compiled into etcd release binaries. `client` can be imported without
|
||||||
|
vendoring. For full compatibility, it is recommended to vendor builds using
|
||||||
|
etcd's vendored packages, using tools like godep, as in
|
||||||
|
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
|
||||||
|
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
etcd client returns 2 types of errors:
|
||||||
|
|
||||||
|
1. context error: canceled or deadline exceeded.
|
||||||
|
2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes).
|
||||||
|
|
||||||
|
Here is the example code to handle client errors:
|
||||||
|
|
||||||
|
```go
|
||||||
|
resp, err := cli.Put(ctx, "", "")
|
||||||
|
if err != nil {
|
||||||
|
switch err {
|
||||||
|
case context.Canceled:
|
||||||
|
log.Fatalf("ctx is canceled by another routine: %v", err)
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
|
||||||
|
case rpctypes.ErrEmptyKey:
|
||||||
|
log.Fatalf("client-side error: %v", err)
|
||||||
|
default:
|
||||||
|
log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Metrics
|
||||||
|
|
||||||
|
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
|
||||||
|
|
||||||
|
## Namespacing
|
||||||
|
|
||||||
|
The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
34
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
generated
vendored
Normal file
34
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
syntax = "proto2";
|
||||||
|
package etcdserverpb;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
message Request {
|
||||||
|
optional uint64 ID = 1 [(gogoproto.nullable) = false];
|
||||||
|
optional string Method = 2 [(gogoproto.nullable) = false];
|
||||||
|
optional string Path = 3 [(gogoproto.nullable) = false];
|
||||||
|
optional string Val = 4 [(gogoproto.nullable) = false];
|
||||||
|
optional bool Dir = 5 [(gogoproto.nullable) = false];
|
||||||
|
optional string PrevValue = 6 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false];
|
||||||
|
optional bool PrevExist = 8 [(gogoproto.nullable) = true];
|
||||||
|
optional int64 Expiration = 9 [(gogoproto.nullable) = false];
|
||||||
|
optional bool Wait = 10 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 Since = 11 [(gogoproto.nullable) = false];
|
||||||
|
optional bool Recursive = 12 [(gogoproto.nullable) = false];
|
||||||
|
optional bool Sorted = 13 [(gogoproto.nullable) = false];
|
||||||
|
optional bool Quorum = 14 [(gogoproto.nullable) = false];
|
||||||
|
optional int64 Time = 15 [(gogoproto.nullable) = false];
|
||||||
|
optional bool Stream = 16 [(gogoproto.nullable) = false];
|
||||||
|
optional bool Refresh = 17 [(gogoproto.nullable) = true];
|
||||||
|
}
|
||||||
|
|
||||||
|
message Metadata {
|
||||||
|
optional uint64 NodeID = 1 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
|
||||||
|
}
|
74
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
generated
vendored
Normal file
74
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package etcdserverpb;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "etcdserver.proto";
|
||||||
|
import "rpc.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
message RequestHeader {
|
||||||
|
uint64 ID = 1;
|
||||||
|
// username is a username that is associated with an auth token of gRPC connection
|
||||||
|
string username = 2;
|
||||||
|
// auth_revision is a revision number of auth.authStore. It is not related to mvcc
|
||||||
|
uint64 auth_revision = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// An InternalRaftRequest is the union of all requests which can be
|
||||||
|
// sent via raft.
|
||||||
|
message InternalRaftRequest {
|
||||||
|
RequestHeader header = 100;
|
||||||
|
uint64 ID = 1;
|
||||||
|
|
||||||
|
Request v2 = 2;
|
||||||
|
|
||||||
|
RangeRequest range = 3;
|
||||||
|
PutRequest put = 4;
|
||||||
|
DeleteRangeRequest delete_range = 5;
|
||||||
|
TxnRequest txn = 6;
|
||||||
|
CompactionRequest compaction = 7;
|
||||||
|
|
||||||
|
LeaseGrantRequest lease_grant = 8;
|
||||||
|
LeaseRevokeRequest lease_revoke = 9;
|
||||||
|
|
||||||
|
AlarmRequest alarm = 10;
|
||||||
|
|
||||||
|
AuthEnableRequest auth_enable = 1000;
|
||||||
|
AuthDisableRequest auth_disable = 1011;
|
||||||
|
|
||||||
|
InternalAuthenticateRequest authenticate = 1012;
|
||||||
|
|
||||||
|
AuthUserAddRequest auth_user_add = 1100;
|
||||||
|
AuthUserDeleteRequest auth_user_delete = 1101;
|
||||||
|
AuthUserGetRequest auth_user_get = 1102;
|
||||||
|
AuthUserChangePasswordRequest auth_user_change_password = 1103;
|
||||||
|
AuthUserGrantRoleRequest auth_user_grant_role = 1104;
|
||||||
|
AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
|
||||||
|
AuthUserListRequest auth_user_list = 1106;
|
||||||
|
AuthRoleListRequest auth_role_list = 1107;
|
||||||
|
|
||||||
|
AuthRoleAddRequest auth_role_add = 1200;
|
||||||
|
AuthRoleDeleteRequest auth_role_delete = 1201;
|
||||||
|
AuthRoleGetRequest auth_role_get = 1202;
|
||||||
|
AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
|
||||||
|
AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EmptyResponse {
|
||||||
|
}
|
||||||
|
|
||||||
|
// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
|
||||||
|
// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
|
||||||
|
// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
|
||||||
|
message InternalAuthenticateRequest {
|
||||||
|
string name = 1;
|
||||||
|
string password = 2;
|
||||||
|
|
||||||
|
// simple_token is generated in API layer (etcdserver/v3_server.go)
|
||||||
|
string simple_token = 3;
|
||||||
|
}
|
||||||
|
|
984
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
Normal file
984
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
Normal file
|
@ -0,0 +1,984 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package etcdserverpb;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "etcd/mvcc/mvccpb/kv.proto";
|
||||||
|
import "etcd/auth/authpb/auth.proto";
|
||||||
|
|
||||||
|
// for grpc-gateway
|
||||||
|
import "google/api/annotations.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
|
||||||
|
service KV {
|
||||||
|
// Range gets the keys in the range from the key-value store.
|
||||||
|
rpc Range(RangeRequest) returns (RangeResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/kv/range"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts the given key into the key-value store.
|
||||||
|
// A put request increments the revision of the key-value store
|
||||||
|
// and generates one event in the event history.
|
||||||
|
rpc Put(PutRequest) returns (PutResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/kv/put"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteRange deletes the given range from the key-value store.
|
||||||
|
// A delete request increments the revision of the key-value store
|
||||||
|
// and generates a delete event in the event history for every deleted key.
|
||||||
|
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/kv/deleterange"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Txn processes multiple requests in a single transaction.
|
||||||
|
// A txn request increments the revision of the key-value store
|
||||||
|
// and generates events with the same revision for every completed request.
|
||||||
|
// It is not allowed to modify the same key several times within one txn.
|
||||||
|
rpc Txn(TxnRequest) returns (TxnResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/kv/txn"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compact compacts the event history in the etcd key-value store. The key-value
|
||||||
|
// store should be periodically compacted or the event history will continue to grow
|
||||||
|
// indefinitely.
|
||||||
|
rpc Compact(CompactionRequest) returns (CompactionResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/kv/compaction"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service Watch {
|
||||||
|
// Watch watches for events happening or that have happened. Both input and output
|
||||||
|
// are streams; the input stream is for creating and canceling watchers and the output
|
||||||
|
// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
|
||||||
|
// for several watches at once. The entire event history can be watched starting from the
|
||||||
|
// last compaction revision.
|
||||||
|
rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/watch"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service Lease {
|
||||||
|
// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
|
||||||
|
// within a given time to live period. All keys attached to the lease will be expired and
|
||||||
|
// deleted if the lease expires. Each expired key generates a delete event in the event history.
|
||||||
|
rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/lease/grant"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
|
||||||
|
rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/kv/lease/revoke"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
|
||||||
|
// to the server and streaming keep alive responses from the server to the client.
|
||||||
|
rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/lease/keepalive"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseTimeToLive retrieves lease information.
|
||||||
|
rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/kv/lease/timetolive"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(xiangli) List all existing Leases?
|
||||||
|
}
|
||||||
|
|
||||||
|
service Cluster {
|
||||||
|
// MemberAdd adds a member into the cluster.
|
||||||
|
rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/cluster/member/add"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemberRemove removes an existing member from the cluster.
|
||||||
|
rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/cluster/member/remove"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemberUpdate updates the member configuration.
|
||||||
|
rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/cluster/member/update"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemberList lists all the members in the cluster.
|
||||||
|
rpc MemberList(MemberListRequest) returns (MemberListResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/cluster/member/list"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service Maintenance {
|
||||||
|
// Alarm activates, deactivates, and queries alarms regarding cluster health.
|
||||||
|
rpc Alarm(AlarmRequest) returns (AlarmResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/maintenance/alarm"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status gets the status of the member.
|
||||||
|
rpc Status(StatusRequest) returns (StatusResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/maintenance/status"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defragment defragments a member's backend database to recover storage space.
|
||||||
|
rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/maintenance/defragment"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the hash of the local KV state for consistency checking purpose.
|
||||||
|
// This is designed for testing; do not use this in production when there
|
||||||
|
// are ongoing transactions.
|
||||||
|
rpc Hash(HashRequest) returns (HashResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/maintenance/hash"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
|
||||||
|
rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/maintenance/snapshot"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service Auth {
|
||||||
|
// AuthEnable enables authentication.
|
||||||
|
rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/enable"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthDisable disables authentication.
|
||||||
|
rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/disable"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authenticate processes an authenticate request.
|
||||||
|
rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/authenticate"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserAdd adds a new user.
|
||||||
|
rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/user/add"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserGet gets detailed user information.
|
||||||
|
rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/user/get"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserList gets a list of all users.
|
||||||
|
rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/user/list"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserDelete deletes a specified user.
|
||||||
|
rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/user/delete"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserChangePassword changes the password of a specified user.
|
||||||
|
rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/user/changepw"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserGrant grants a role to a specified user.
|
||||||
|
rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/user/grant"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserRevokeRole revokes a role of specified user.
|
||||||
|
rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/user/revoke"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoleAdd adds a new role.
|
||||||
|
rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/role/add"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoleGet gets detailed role information.
|
||||||
|
rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/role/get"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoleList gets lists of all roles.
|
||||||
|
rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/role/list"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoleDelete deletes a specified role.
|
||||||
|
rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/role/delete"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoleGrantPermission grants a permission of a specified key or range to a specified role.
|
||||||
|
rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/role/grant"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoleRevokePermission revokes a key or range permission of a specified role.
|
||||||
|
rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/auth/role/revoke"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResponseHeader {
|
||||||
|
// cluster_id is the ID of the cluster which sent the response.
|
||||||
|
uint64 cluster_id = 1;
|
||||||
|
// member_id is the ID of the member which sent the response.
|
||||||
|
uint64 member_id = 2;
|
||||||
|
// revision is the key-value store revision when the request was applied.
|
||||||
|
int64 revision = 3;
|
||||||
|
// raft_term is the raft term when the request was applied.
|
||||||
|
uint64 raft_term = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RangeRequest {
|
||||||
|
enum SortOrder {
|
||||||
|
NONE = 0; // default, no sorting
|
||||||
|
ASCEND = 1; // lowest target value first
|
||||||
|
DESCEND = 2; // highest target value first
|
||||||
|
}
|
||||||
|
enum SortTarget {
|
||||||
|
KEY = 0;
|
||||||
|
VERSION = 1;
|
||||||
|
CREATE = 2;
|
||||||
|
MOD = 3;
|
||||||
|
VALUE = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// key is the first key for the range. If range_end is not given, the request only looks up key.
|
||||||
|
bytes key = 1;
|
||||||
|
// range_end is the upper bound on the requested range [key, range_end).
|
||||||
|
// If range_end is '\0', the range is all keys >= key.
|
||||||
|
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
||||||
|
// then the range request gets all keys prefixed with key.
|
||||||
|
// If both key and range_end are '\0', then the range request returns all keys.
|
||||||
|
bytes range_end = 2;
|
||||||
|
// limit is a limit on the number of keys returned for the request. When limit is set to 0,
|
||||||
|
// it is treated as no limit.
|
||||||
|
int64 limit = 3;
|
||||||
|
// revision is the point-in-time of the key-value store to use for the range.
|
||||||
|
// If revision is less or equal to zero, the range is over the newest key-value store.
|
||||||
|
// If the revision has been compacted, ErrCompacted is returned as a response.
|
||||||
|
int64 revision = 4;
|
||||||
|
|
||||||
|
// sort_order is the order for returned sorted results.
|
||||||
|
SortOrder sort_order = 5;
|
||||||
|
|
||||||
|
// sort_target is the key-value field to use for sorting.
|
||||||
|
SortTarget sort_target = 6;
|
||||||
|
|
||||||
|
// serializable sets the range request to use serializable member-local reads.
|
||||||
|
// Range requests are linearizable by default; linearizable requests have higher
|
||||||
|
// latency and lower throughput than serializable requests but reflect the current
|
||||||
|
// consensus of the cluster. For better performance, in exchange for possible stale reads,
|
||||||
|
// a serializable range request is served locally without needing to reach consensus
|
||||||
|
// with other nodes in the cluster.
|
||||||
|
bool serializable = 7;
|
||||||
|
|
||||||
|
// keys_only when set returns only the keys and not the values.
|
||||||
|
bool keys_only = 8;
|
||||||
|
|
||||||
|
// count_only when set returns only the count of the keys in the range.
|
||||||
|
bool count_only = 9;
|
||||||
|
|
||||||
|
// min_mod_revision is the lower bound for returned key mod revisions; all keys with
|
||||||
|
// lesser mod revisions will be filtered away.
|
||||||
|
int64 min_mod_revision = 10;
|
||||||
|
|
||||||
|
// max_mod_revision is the upper bound for returned key mod revisions; all keys with
|
||||||
|
// greater mod revisions will be filtered away.
|
||||||
|
int64 max_mod_revision = 11;
|
||||||
|
|
||||||
|
// min_create_revision is the lower bound for returned key create revisions; all keys with
|
||||||
|
// lesser create trevisions will be filtered away.
|
||||||
|
int64 min_create_revision = 12;
|
||||||
|
|
||||||
|
// max_create_revision is the upper bound for returned key create revisions; all keys with
|
||||||
|
// greater create revisions will be filtered away.
|
||||||
|
int64 max_create_revision = 13;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RangeResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// kvs is the list of key-value pairs matched by the range request.
|
||||||
|
// kvs is empty when count is requested.
|
||||||
|
repeated mvccpb.KeyValue kvs = 2;
|
||||||
|
// more indicates if there are more keys to return in the requested range.
|
||||||
|
bool more = 3;
|
||||||
|
// count is set to the number of keys within the range when requested.
|
||||||
|
int64 count = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PutRequest {
|
||||||
|
// key is the key, in bytes, to put into the key-value store.
|
||||||
|
bytes key = 1;
|
||||||
|
// value is the value, in bytes, to associate with the key in the key-value store.
|
||||||
|
bytes value = 2;
|
||||||
|
// lease is the lease ID to associate with the key in the key-value store. A lease
|
||||||
|
// value of 0 indicates no lease.
|
||||||
|
int64 lease = 3;
|
||||||
|
|
||||||
|
// If prev_kv is set, etcd gets the previous key-value pair before changing it.
|
||||||
|
// The previous key-value pair will be returned in the put response.
|
||||||
|
bool prev_kv = 4;
|
||||||
|
|
||||||
|
// If ignore_value is set, etcd updates the key using its current value.
|
||||||
|
// Returns an error if the key does not exist.
|
||||||
|
bool ignore_value = 5;
|
||||||
|
|
||||||
|
// If ignore_lease is set, etcd updates the key using its current lease.
|
||||||
|
// Returns an error if the key does not exist.
|
||||||
|
bool ignore_lease = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PutResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// if prev_kv is set in the request, the previous key-value pair will be returned.
|
||||||
|
mvccpb.KeyValue prev_kv = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteRangeRequest {
|
||||||
|
// key is the first key to delete in the range.
|
||||||
|
bytes key = 1;
|
||||||
|
// range_end is the key following the last key to delete for the range [key, range_end).
|
||||||
|
// If range_end is not given, the range is defined to contain only the key argument.
|
||||||
|
// If range_end is one bit larger than the given key, then the range is all the keys
|
||||||
|
// with the prefix (the given key).
|
||||||
|
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
|
||||||
|
bytes range_end = 2;
|
||||||
|
|
||||||
|
// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
|
||||||
|
// The previous key-value pairs will be returned in the delete response.
|
||||||
|
bool prev_kv = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteRangeResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// deleted is the number of keys deleted by the delete range request.
|
||||||
|
int64 deleted = 2;
|
||||||
|
// if prev_kv is set in the request, the previous key-value pairs will be returned.
|
||||||
|
repeated mvccpb.KeyValue prev_kvs = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RequestOp {
|
||||||
|
// request is a union of request types accepted by a transaction.
|
||||||
|
oneof request {
|
||||||
|
RangeRequest request_range = 1;
|
||||||
|
PutRequest request_put = 2;
|
||||||
|
DeleteRangeRequest request_delete_range = 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResponseOp {
|
||||||
|
// response is a union of response types returned by a transaction.
|
||||||
|
oneof response {
|
||||||
|
RangeResponse response_range = 1;
|
||||||
|
PutResponse response_put = 2;
|
||||||
|
DeleteRangeResponse response_delete_range = 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message Compare {
|
||||||
|
enum CompareResult {
|
||||||
|
EQUAL = 0;
|
||||||
|
GREATER = 1;
|
||||||
|
LESS = 2;
|
||||||
|
NOT_EQUAL = 3;
|
||||||
|
}
|
||||||
|
enum CompareTarget {
|
||||||
|
VERSION = 0;
|
||||||
|
CREATE = 1;
|
||||||
|
MOD = 2;
|
||||||
|
VALUE= 3;
|
||||||
|
}
|
||||||
|
// result is logical comparison operation for this comparison.
|
||||||
|
CompareResult result = 1;
|
||||||
|
// target is the key-value field to inspect for the comparison.
|
||||||
|
CompareTarget target = 2;
|
||||||
|
// key is the subject key for the comparison operation.
|
||||||
|
bytes key = 3;
|
||||||
|
oneof target_union {
|
||||||
|
// version is the version of the given key
|
||||||
|
int64 version = 4;
|
||||||
|
// create_revision is the creation revision of the given key
|
||||||
|
int64 create_revision = 5;
|
||||||
|
// mod_revision is the last modified revision of the given key.
|
||||||
|
int64 mod_revision = 6;
|
||||||
|
// value is the value of the given key, in bytes.
|
||||||
|
bytes value = 7;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// From google paxosdb paper:
|
||||||
|
// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
|
||||||
|
// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
|
||||||
|
// and consists of three components:
|
||||||
|
// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
|
||||||
|
// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
|
||||||
|
// may apply to the same or different entries in the database. All tests in the guard are applied and
|
||||||
|
// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
|
||||||
|
// it executes f op (see item 3 below).
|
||||||
|
// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
|
||||||
|
// lookup operation, and applies to a single database entry. Two different operations in the list may apply
|
||||||
|
// to the same or different entries in the database. These operations are executed
|
||||||
|
// if guard evaluates to
|
||||||
|
// true.
|
||||||
|
// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
|
||||||
|
message TxnRequest {
|
||||||
|
// compare is a list of predicates representing a conjunction of terms.
|
||||||
|
// If the comparisons succeed, then the success requests will be processed in order,
|
||||||
|
// and the response will contain their respective responses in order.
|
||||||
|
// If the comparisons fail, then the failure requests will be processed in order,
|
||||||
|
// and the response will contain their respective responses in order.
|
||||||
|
repeated Compare compare = 1;
|
||||||
|
// success is a list of requests which will be applied when compare evaluates to true.
|
||||||
|
repeated RequestOp success = 2;
|
||||||
|
// failure is a list of requests which will be applied when compare evaluates to false.
|
||||||
|
repeated RequestOp failure = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TxnResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// succeeded is set to true if the compare evaluated to true or false otherwise.
|
||||||
|
bool succeeded = 2;
|
||||||
|
// responses is a list of responses corresponding to the results from applying
|
||||||
|
// success if succeeded is true or failure if succeeded is false.
|
||||||
|
repeated ResponseOp responses = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
|
||||||
|
// with a revision less than the compaction revision will be removed.
|
||||||
|
message CompactionRequest {
|
||||||
|
// revision is the key-value store revision for the compaction operation.
|
||||||
|
int64 revision = 1;
|
||||||
|
// physical is set so the RPC will wait until the compaction is physically
|
||||||
|
// applied to the local database such that compacted entries are totally
|
||||||
|
// removed from the backend database.
|
||||||
|
bool physical = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CompactionResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message HashRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message HashResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// hash is the hash value computed from the responding member's key-value store.
|
||||||
|
uint32 hash = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SnapshotRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message SnapshotResponse {
|
||||||
|
// header has the current key-value store information. The first header in the snapshot
|
||||||
|
// stream indicates the point in time of the snapshot.
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
|
||||||
|
// remaining_bytes is the number of blob bytes to be sent after this message
|
||||||
|
uint64 remaining_bytes = 2;
|
||||||
|
|
||||||
|
// blob contains the next chunk of the snapshot in the snapshot stream.
|
||||||
|
bytes blob = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WatchRequest {
|
||||||
|
// request_union is a request to either create a new watcher or cancel an existing watcher.
|
||||||
|
oneof request_union {
|
||||||
|
WatchCreateRequest create_request = 1;
|
||||||
|
WatchCancelRequest cancel_request = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message WatchCreateRequest {
|
||||||
|
// key is the key to register for watching.
|
||||||
|
bytes key = 1;
|
||||||
|
// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
|
||||||
|
// only the key argument is watched. If range_end is equal to '\0', all keys greater than
|
||||||
|
// or equal to the key argument are watched.
|
||||||
|
// If the range_end is one bit larger than the given key,
|
||||||
|
// then all keys with the prefix (the given key) will be watched.
|
||||||
|
bytes range_end = 2;
|
||||||
|
// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
|
||||||
|
int64 start_revision = 3;
|
||||||
|
// progress_notify is set so that the etcd server will periodically send a WatchResponse with
|
||||||
|
// no events to the new watcher if there are no recent events. It is useful when clients
|
||||||
|
// wish to recover a disconnected watcher starting from a recent known revision.
|
||||||
|
// The etcd server may decide how often it will send notifications based on current load.
|
||||||
|
bool progress_notify = 4;
|
||||||
|
|
||||||
|
enum FilterType {
|
||||||
|
// filter out put event.
|
||||||
|
NOPUT = 0;
|
||||||
|
// filter out delete event.
|
||||||
|
NODELETE = 1;
|
||||||
|
}
|
||||||
|
// filters filter the events at server side before it sends back to the watcher.
|
||||||
|
repeated FilterType filters = 5;
|
||||||
|
|
||||||
|
// If prev_kv is set, created watcher gets the previous KV before the event happens.
|
||||||
|
// If the previous KV is already compacted, nothing will be returned.
|
||||||
|
bool prev_kv = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WatchCancelRequest {
|
||||||
|
// watch_id is the watcher id to cancel so that no more events are transmitted.
|
||||||
|
int64 watch_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WatchResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// watch_id is the ID of the watcher that corresponds to the response.
|
||||||
|
int64 watch_id = 2;
|
||||||
|
// created is set to true if the response is for a create watch request.
|
||||||
|
// The client should record the watch_id and expect to receive events for
|
||||||
|
// the created watcher from the same stream.
|
||||||
|
// All events sent to the created watcher will attach with the same watch_id.
|
||||||
|
bool created = 3;
|
||||||
|
// canceled is set to true if the response is for a cancel watch request.
|
||||||
|
// No further events will be sent to the canceled watcher.
|
||||||
|
bool canceled = 4;
|
||||||
|
// compact_revision is set to the minimum index if a watcher tries to watch
|
||||||
|
// at a compacted index.
|
||||||
|
//
|
||||||
|
// This happens when creating a watcher at a compacted revision or the watcher cannot
|
||||||
|
// catch up with the progress of the key-value store.
|
||||||
|
//
|
||||||
|
// The client should treat the watcher as canceled and should not try to create any
|
||||||
|
// watcher with the same start_revision again.
|
||||||
|
int64 compact_revision = 5;
|
||||||
|
|
||||||
|
// cancel_reason indicates the reason for canceling the watcher.
|
||||||
|
string cancel_reason = 6;
|
||||||
|
|
||||||
|
repeated mvccpb.Event events = 11;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseGrantRequest {
|
||||||
|
// TTL is the advisory time-to-live in seconds.
|
||||||
|
int64 TTL = 1;
|
||||||
|
// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
|
||||||
|
int64 ID = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseGrantResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// ID is the lease ID for the granted lease.
|
||||||
|
int64 ID = 2;
|
||||||
|
// TTL is the server chosen lease time-to-live in seconds.
|
||||||
|
int64 TTL = 3;
|
||||||
|
string error = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseRevokeRequest {
|
||||||
|
// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
|
||||||
|
int64 ID = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseRevokeResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseKeepAliveRequest {
|
||||||
|
// ID is the lease ID for the lease to keep alive.
|
||||||
|
int64 ID = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseKeepAliveResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// ID is the lease ID from the keep alive request.
|
||||||
|
int64 ID = 2;
|
||||||
|
// TTL is the new time-to-live for the lease.
|
||||||
|
int64 TTL = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseTimeToLiveRequest {
|
||||||
|
// ID is the lease ID for the lease.
|
||||||
|
int64 ID = 1;
|
||||||
|
// keys is true to query all the keys attached to this lease.
|
||||||
|
bool keys = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseTimeToLiveResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// ID is the lease ID from the keep alive request.
|
||||||
|
int64 ID = 2;
|
||||||
|
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||||
|
int64 TTL = 3;
|
||||||
|
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||||
|
int64 grantedTTL = 4;
|
||||||
|
// Keys is the list of keys attached to this lease.
|
||||||
|
repeated bytes keys = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Member {
|
||||||
|
// ID is the member ID for this member.
|
||||||
|
uint64 ID = 1;
|
||||||
|
// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
|
||||||
|
string name = 2;
|
||||||
|
// peerURLs is the list of URLs the member exposes to the cluster for communication.
|
||||||
|
repeated string peerURLs = 3;
|
||||||
|
// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
|
||||||
|
repeated string clientURLs = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MemberAddRequest {
|
||||||
|
// peerURLs is the list of URLs the added member will use to communicate with the cluster.
|
||||||
|
repeated string peerURLs = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MemberAddResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// member is the member information for the added member.
|
||||||
|
Member member = 2;
|
||||||
|
// members is a list of all members after adding the new member.
|
||||||
|
repeated Member members = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MemberRemoveRequest {
|
||||||
|
// ID is the member ID of the member to remove.
|
||||||
|
uint64 ID = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MemberRemoveResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// members is a list of all members after removing the member.
|
||||||
|
repeated Member members = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MemberUpdateRequest {
|
||||||
|
// ID is the member ID of the member to update.
|
||||||
|
uint64 ID = 1;
|
||||||
|
// peerURLs is the new list of URLs the member will use to communicate with the cluster.
|
||||||
|
repeated string peerURLs = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MemberUpdateResponse{
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// members is a list of all members after updating the member.
|
||||||
|
repeated Member members = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MemberListRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message MemberListResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// members is a list of all members associated with the cluster.
|
||||||
|
repeated Member members = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DefragmentRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message DefragmentResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum AlarmType {
|
||||||
|
NONE = 0; // default, used to query if any alarm is active
|
||||||
|
NOSPACE = 1; // space quota is exhausted
|
||||||
|
}
|
||||||
|
|
||||||
|
message AlarmRequest {
|
||||||
|
enum AlarmAction {
|
||||||
|
GET = 0;
|
||||||
|
ACTIVATE = 1;
|
||||||
|
DEACTIVATE = 2;
|
||||||
|
}
|
||||||
|
// action is the kind of alarm request to issue. The action
|
||||||
|
// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
|
||||||
|
// raised alarm.
|
||||||
|
AlarmAction action = 1;
|
||||||
|
// memberID is the ID of the member associated with the alarm. If memberID is 0, the
|
||||||
|
// alarm request covers all members.
|
||||||
|
uint64 memberID = 2;
|
||||||
|
// alarm is the type of alarm to consider for this request.
|
||||||
|
AlarmType alarm = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AlarmMember {
|
||||||
|
// memberID is the ID of the member associated with the raised alarm.
|
||||||
|
uint64 memberID = 1;
|
||||||
|
// alarm is the type of alarm which has been raised.
|
||||||
|
AlarmType alarm = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AlarmResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// alarms is a list of alarms associated with the alarm request.
|
||||||
|
repeated AlarmMember alarms = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatusRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatusResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// version is the cluster protocol version used by the responding member.
|
||||||
|
string version = 2;
|
||||||
|
// dbSize is the size of the backend database, in bytes, of the responding member.
|
||||||
|
int64 dbSize = 3;
|
||||||
|
// leader is the member ID which the responding member believes is the current leader.
|
||||||
|
uint64 leader = 4;
|
||||||
|
// raftIndex is the current raft index of the responding member.
|
||||||
|
uint64 raftIndex = 5;
|
||||||
|
// raftTerm is the current raft term of the responding member.
|
||||||
|
uint64 raftTerm = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthEnableRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthDisableRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthenticateRequest {
|
||||||
|
string name = 1;
|
||||||
|
string password = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserAddRequest {
|
||||||
|
string name = 1;
|
||||||
|
string password = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserGetRequest {
|
||||||
|
string name = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserDeleteRequest {
|
||||||
|
// name is the name of the user to delete.
|
||||||
|
string name = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserChangePasswordRequest {
|
||||||
|
// name is the name of the user whose password is being changed.
|
||||||
|
string name = 1;
|
||||||
|
// password is the new password for the user.
|
||||||
|
string password = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserGrantRoleRequest {
|
||||||
|
// user is the name of the user which should be granted a given role.
|
||||||
|
string user = 1;
|
||||||
|
// role is the name of the role to grant to the user.
|
||||||
|
string role = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserRevokeRoleRequest {
|
||||||
|
string name = 1;
|
||||||
|
string role = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleAddRequest {
|
||||||
|
// name is the name of the role to add to the authentication system.
|
||||||
|
string name = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleGetRequest {
|
||||||
|
string role = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserListRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleListRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleDeleteRequest {
|
||||||
|
string role = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleGrantPermissionRequest {
|
||||||
|
// name is the name of the role which will be granted the permission.
|
||||||
|
string name = 1;
|
||||||
|
// perm is the permission to grant to the role.
|
||||||
|
authpb.Permission perm = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleRevokePermissionRequest {
|
||||||
|
string role = 1;
|
||||||
|
string key = 2;
|
||||||
|
string range_end = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthEnableResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthDisableResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthenticateResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// token is an authorized token that can be used in succeeding RPCs
|
||||||
|
string token = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserAddResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserGetResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
|
||||||
|
repeated string roles = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserDeleteResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserChangePasswordResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserGrantRoleResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserRevokeRoleResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleAddResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleGetResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
|
||||||
|
repeated authpb.Permission perm = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleListResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
|
||||||
|
repeated string roles = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserListResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
|
||||||
|
repeated string users = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleDeleteResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleGrantPermissionResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthRoleRevokePermissionResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
}
|
29
vendor/github.com/coreos/etcd/main.go
generated
vendored
29
vendor/github.com/coreos/etcd/main.go
generated
vendored
|
@ -1,29 +0,0 @@
|
||||||
// Copyright 2015 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package main is a simple wrapper of the real etcd entrypoint package
|
|
||||||
// (located at github.com/coreos/etcd/etcdmain) to ensure that etcd is still
|
|
||||||
// "go getable"; e.g. `go get github.com/coreos/etcd` works as expected and
|
|
||||||
// builds a binary in $GOBIN/etcd
|
|
||||||
//
|
|
||||||
// This package should NOT be extended or modified in any way; to modify the
|
|
||||||
// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package.
|
|
||||||
//
|
|
||||||
package main
|
|
||||||
|
|
||||||
import "github.com/coreos/etcd/etcdmain"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
etcdmain.Main()
|
|
||||||
}
|
|
49
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
generated
vendored
Normal file
49
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package mvccpb;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
option (gogoproto.goproto_enum_prefix_all) = false;
|
||||||
|
|
||||||
|
message KeyValue {
|
||||||
|
// key is the key in bytes. An empty key is not allowed.
|
||||||
|
bytes key = 1;
|
||||||
|
// create_revision is the revision of last creation on this key.
|
||||||
|
int64 create_revision = 2;
|
||||||
|
// mod_revision is the revision of last modification on this key.
|
||||||
|
int64 mod_revision = 3;
|
||||||
|
// version is the version of the key. A deletion resets
|
||||||
|
// the version to zero and any modification of the key
|
||||||
|
// increases its version.
|
||||||
|
int64 version = 4;
|
||||||
|
// value is the value held by the key, in bytes.
|
||||||
|
bytes value = 5;
|
||||||
|
// lease is the ID of the lease that attached to key.
|
||||||
|
// When the attached lease expires, the key will be deleted.
|
||||||
|
// If lease is 0, then no lease is attached to the key.
|
||||||
|
int64 lease = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Event {
|
||||||
|
enum EventType {
|
||||||
|
PUT = 0;
|
||||||
|
DELETE = 1;
|
||||||
|
}
|
||||||
|
// type is the kind of event. If type is a PUT, it indicates
|
||||||
|
// new data has been stored to the key. If type is a DELETE,
|
||||||
|
// it indicates the key was deleted.
|
||||||
|
EventType type = 1;
|
||||||
|
// kv holds the KeyValue for the event.
|
||||||
|
// A PUT event contains current kv pair.
|
||||||
|
// A PUT event with kv.Version=1 indicates the creation of a key.
|
||||||
|
// A DELETE/EXPIRE event contains the deleted key with
|
||||||
|
// its modification revision set to the revision of deletion.
|
||||||
|
KeyValue kv = 2;
|
||||||
|
|
||||||
|
// prev_kv holds the key-value pair before the event happens.
|
||||||
|
KeyValue prev_kv = 3;
|
||||||
|
}
|
2
vendor/github.com/coreos/go-oidc/.gitignore
generated
vendored
Normal file
2
vendor/github.com/coreos/go-oidc/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
/bin
|
||||||
|
/gopath
|
16
vendor/github.com/coreos/go-oidc/.travis.yml
generated
vendored
Normal file
16
vendor/github.com/coreos/go-oidc/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.7.5
|
||||||
|
- 1.8
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get -v -t github.com/coreos/go-oidc/...
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
- go get github.com/golang/lint/golint
|
||||||
|
|
||||||
|
script:
|
||||||
|
- ./test
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
email: false
|
71
vendor/github.com/coreos/go-oidc/CONTRIBUTING.md
generated
vendored
Normal file
71
vendor/github.com/coreos/go-oidc/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
# How to Contribute
|
||||||
|
|
||||||
|
CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
|
||||||
|
GitHub pull requests. This document outlines some of the conventions on
|
||||||
|
development workflow, commit message formatting, contact points and other
|
||||||
|
resources to make it easier to get your contribution accepted.
|
||||||
|
|
||||||
|
# Certificate of Origin
|
||||||
|
|
||||||
|
By contributing to this project you agree to the Developer Certificate of
|
||||||
|
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||||
|
simple statement that you, as a contributor, have the legal right to make the
|
||||||
|
contribution. See the [DCO](DCO) file for details.
|
||||||
|
|
||||||
|
# Email and Chat
|
||||||
|
|
||||||
|
The project currently uses the general CoreOS email list and IRC channel:
|
||||||
|
- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
|
||||||
|
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
|
||||||
|
|
||||||
|
Please avoid emailing maintainers found in the MAINTAINERS file directly. They
|
||||||
|
are very busy and read the mailing lists.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
- Fork the repository on GitHub
|
||||||
|
- Read the [README](README.md) for build and test instructions
|
||||||
|
- Play with the project, submit bugs, submit patches!
|
||||||
|
|
||||||
|
## Contribution Flow
|
||||||
|
|
||||||
|
This is a rough outline of what a contributor's workflow looks like:
|
||||||
|
|
||||||
|
- Create a topic branch from where you want to base your work (usually master).
|
||||||
|
- Make commits of logical units.
|
||||||
|
- Make sure your commit messages are in the proper format (see below).
|
||||||
|
- Push your changes to a topic branch in your fork of the repository.
|
||||||
|
- Make sure the tests pass, and add any new tests as appropriate.
|
||||||
|
- Submit a pull request to the original repository.
|
||||||
|
|
||||||
|
Thanks for your contributions!
|
||||||
|
|
||||||
|
### Format of the Commit Message
|
||||||
|
|
||||||
|
We follow a rough convention for commit messages that is designed to answer two
|
||||||
|
questions: what changed and why. The subject line should feature the what and
|
||||||
|
the body of the commit should describe the why.
|
||||||
|
|
||||||
|
```
|
||||||
|
scripts: add the test-cluster command
|
||||||
|
|
||||||
|
this uses tmux to setup a test cluster that you can easily kill and
|
||||||
|
start for debugging.
|
||||||
|
|
||||||
|
Fixes #38
|
||||||
|
```
|
||||||
|
|
||||||
|
The format can be described more formally as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
<subsystem>: <what changed>
|
||||||
|
<BLANK LINE>
|
||||||
|
<why this change was made>
|
||||||
|
<BLANK LINE>
|
||||||
|
<footer>
|
||||||
|
```
|
||||||
|
|
||||||
|
The first line is the subject and should be no longer than 70 characters, the
|
||||||
|
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||||
|
This allows the message to be easier to read on GitHub as well as in various
|
||||||
|
git tools.
|
36
vendor/github.com/coreos/go-oidc/DCO
generated
vendored
Normal file
36
vendor/github.com/coreos/go-oidc/DCO
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
Developer Certificate of Origin
|
||||||
|
Version 1.1
|
||||||
|
|
||||||
|
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||||
|
660 York Street, Suite 102,
|
||||||
|
San Francisco, CA 94110 USA
|
||||||
|
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies of this
|
||||||
|
license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
|
||||||
|
Developer's Certificate of Origin 1.1
|
||||||
|
|
||||||
|
By making a contribution to this project, I certify that:
|
||||||
|
|
||||||
|
(a) The contribution was created in whole or in part by me and I
|
||||||
|
have the right to submit it under the open source license
|
||||||
|
indicated in the file; or
|
||||||
|
|
||||||
|
(b) The contribution is based upon previous work that, to the best
|
||||||
|
of my knowledge, is covered under an appropriate open source
|
||||||
|
license and I have the right under that license to submit that
|
||||||
|
work with modifications, whether created in whole or in part
|
||||||
|
by me, under the same open source license (unless I am
|
||||||
|
permitted to submit under a different license), as indicated
|
||||||
|
in the file; or
|
||||||
|
|
||||||
|
(c) The contribution was provided directly to me by some other
|
||||||
|
person who certified (a), (b) or (c) and I have not modified
|
||||||
|
it.
|
||||||
|
|
||||||
|
(d) I understand and agree that this project and the contribution
|
||||||
|
are public and that a record of the contribution (including all
|
||||||
|
personal information I submit with it, including my sign-off) is
|
||||||
|
maintained indefinitely and may be redistributed consistent with
|
||||||
|
this project or the open source license(s) involved.
|
3
vendor/github.com/coreos/go-oidc/MAINTAINERS
generated
vendored
Normal file
3
vendor/github.com/coreos/go-oidc/MAINTAINERS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
Bobby Rullo <bobby.rullo@coreos.com> (@bobbyrullo)
|
||||||
|
Ed Rooth <ed.rooth@coreos.com> (@sym3tri)
|
||||||
|
Eric Chiang <eric.chiang@coreos.com> (@ericchiang)
|
72
vendor/github.com/coreos/go-oidc/README.md
generated
vendored
Normal file
72
vendor/github.com/coreos/go-oidc/README.md
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
# go-oidc
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/coreos/go-oidc?status.svg)](https://godoc.org/github.com/coreos/go-oidc)
|
||||||
|
[![Build Status](https://travis-ci.org/coreos/go-oidc.png?branch=master)](https://travis-ci.org/coreos/go-oidc)
|
||||||
|
|
||||||
|
## OpenID Connect support for Go
|
||||||
|
|
||||||
|
This package enables OpenID Connect support for the [golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) package.
|
||||||
|
|
||||||
|
```go
|
||||||
|
provider, err := oidc.NewProvider(ctx, "https://accounts.google.com")
|
||||||
|
if err != nil {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure an OpenID Connect aware OAuth2 client.
|
||||||
|
oauth2Config := oauth2.Config{
|
||||||
|
ClientID: clientID,
|
||||||
|
ClientSecret: clientSecret,
|
||||||
|
RedirectURL: redirectURL,
|
||||||
|
|
||||||
|
// Discovery returns the OAuth2 endpoints.
|
||||||
|
Endpoint: provider.Endpoint(),
|
||||||
|
|
||||||
|
// "openid" is a required scope for OpenID Connect flows.
|
||||||
|
Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
OAuth2 redirects are unchanged.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func handleRedirect(w http.ResponseWriter, r *http.Request) {
|
||||||
|
http.Redirect(w, r, oauth2Config.AuthCodeURL(state), http.StatusFound)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The on responses, the provider can be used to verify ID Tokens.
|
||||||
|
|
||||||
|
```go
|
||||||
|
var verifier = provider.Verifier()
|
||||||
|
|
||||||
|
func handleOAuth2Callback(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Verify state and errors.
|
||||||
|
|
||||||
|
oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
|
||||||
|
if err != nil {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the ID Token from OAuth2 token.
|
||||||
|
rawIDToken, ok := oauth2Token.Extra("id_token").(string)
|
||||||
|
if !ok {
|
||||||
|
// handle missing token
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse and verify ID Token payload.
|
||||||
|
idToken, err := verifier.Verify(ctx, rawIDToken)
|
||||||
|
if err != nil {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract custom claims
|
||||||
|
var claims struct {
|
||||||
|
Email string `json:"email"`
|
||||||
|
Verified bool `json:"email_verified"`
|
||||||
|
}
|
||||||
|
if err := idToken.Claims(&claims); err != nil {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
15
vendor/github.com/coreos/go-oidc/test
generated
vendored
Normal file
15
vendor/github.com/coreos/go-oidc/test
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Filter out any files with a !golint build tag.
|
||||||
|
LINTABLE=$( go list -tags=golint -f '
|
||||||
|
{{- range $i, $file := .GoFiles -}}
|
||||||
|
{{ $file }} {{ end }}
|
||||||
|
{{ range $i, $file := .TestGoFiles -}}
|
||||||
|
{{ $file }} {{ end }}' github.com/coreos/go-oidc )
|
||||||
|
|
||||||
|
go test -v -i -race github.com/coreos/go-oidc/...
|
||||||
|
go test -v -race github.com/coreos/go-oidc/...
|
||||||
|
golint $LINTABLE
|
||||||
|
go vet github.com/coreos/go-oidc/...
|
0
vendor/github.com/felixge/httpsnoop/.gitignore
generated
vendored
Normal file
0
vendor/github.com/felixge/httpsnoop/.gitignore
generated
vendored
Normal file
6
vendor/github.com/felixge/httpsnoop/.travis.yml
generated
vendored
Normal file
6
vendor/github.com/felixge/httpsnoop/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.6
|
||||||
|
- 1.7
|
||||||
|
- 1.8
|
10
vendor/github.com/felixge/httpsnoop/Makefile
generated
vendored
Normal file
10
vendor/github.com/felixge/httpsnoop/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
.PHONY: ci generate clean
|
||||||
|
|
||||||
|
ci: clean generate
|
||||||
|
go test -v ./...
|
||||||
|
|
||||||
|
generate:
|
||||||
|
go generate .
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf *_generated*.go
|
94
vendor/github.com/felixge/httpsnoop/README.md
generated
vendored
Normal file
94
vendor/github.com/felixge/httpsnoop/README.md
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
# httpsnoop
|
||||||
|
|
||||||
|
Package httpsnoop provides an easy way to capture http related metrics (i.e.
|
||||||
|
response time, bytes written, and http status code) from your application's
|
||||||
|
http.Handlers.
|
||||||
|
|
||||||
|
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
|
||||||
|
which is also exposed for users interested in a more low-level API.
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop)
|
||||||
|
[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop)
|
||||||
|
|
||||||
|
## Usage Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
// myH is your app's http handler, perhaps a http.ServeMux or similar.
|
||||||
|
var myH http.Handler
|
||||||
|
// wrappedH wraps myH in order to log every request.
|
||||||
|
wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
m := httpsnoop.CaptureMetrics(myH, w, r)
|
||||||
|
log.Printf(
|
||||||
|
"%s %s (code=%d dt=%s written=%d)",
|
||||||
|
r.Method,
|
||||||
|
r.URL,
|
||||||
|
m.Code,
|
||||||
|
m.Duration,
|
||||||
|
m.Written,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
http.ListenAndServe(":8080", wrappedH)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Why this package exists
|
||||||
|
|
||||||
|
Instrumenting an application's http.Handler is surprisingly difficult.
|
||||||
|
|
||||||
|
However if you google for e.g. "capture ResponseWriter status code" you'll find
|
||||||
|
lots of advise and code examples that suggest it to be a fairly trivial
|
||||||
|
undertaking. Unfortunately everything I've seen so far has a high chance of
|
||||||
|
breaking your application.
|
||||||
|
|
||||||
|
The main problem is that a `http.ResponseWriter` often implements additional
|
||||||
|
interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and
|
||||||
|
`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter`
|
||||||
|
in your own struct that also implements the `http.ResponseWriter` interface
|
||||||
|
will hide the additional interfaces mentioned above. This has a high change of
|
||||||
|
introducing subtle bugs into any non-trivial application.
|
||||||
|
|
||||||
|
Another approach I've seen people take is to return a struct that implements
|
||||||
|
all of the interfaces above. However, that's also problematic, because it's
|
||||||
|
difficult to fake some of these interfaces behaviors when the underlying
|
||||||
|
`http.ResponseWriter` doesn't have an implementation. It's also dangerous,
|
||||||
|
because an application may choose to operate differently, merely because it
|
||||||
|
detects the presence of these additional interfaces.
|
||||||
|
|
||||||
|
This package solves this problem by checking which additional interfaces a
|
||||||
|
`http.ResponseWriter` implements, returning a wrapped version implementing the
|
||||||
|
exact same set of interfaces.
|
||||||
|
|
||||||
|
Additionally this package properly handles edge cases such as `WriteHeader` not
|
||||||
|
being called, or called more than once, as well as concurrent calls to
|
||||||
|
`http.ResponseWriter` methods, and even calls happening after the wrapped
|
||||||
|
`ServeHTTP` has already returned.
|
||||||
|
|
||||||
|
Unfortunately this package is not perfect either. It's possible that it is
|
||||||
|
still missing some interfaces provided by the go core (let me know if you find
|
||||||
|
one), and it won't work for applications adding their own interfaces into the
|
||||||
|
mix.
|
||||||
|
|
||||||
|
However, hopefully the explanation above has sufficiently scared you of rolling
|
||||||
|
your own solution to this problem. httpsnoop may still break your application,
|
||||||
|
but at least it tries to avoid it as much as possible.
|
||||||
|
|
||||||
|
Anyway, the real problem here is that smuggling additional interfaces inside
|
||||||
|
`http.ResponseWriter` is a problematic design choice, but it probably goes as
|
||||||
|
deep as the Go language specification itself. But that's okay, I still prefer
|
||||||
|
Go over the alternatives ;).
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
```
|
||||||
|
BenchmarkBaseline-8 20000 94912 ns/op
|
||||||
|
BenchmarkCaptureMetrics-8 20000 95461 ns/op
|
||||||
|
```
|
||||||
|
|
||||||
|
As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an
|
||||||
|
overhead of ~500 ns per http request on my machine. However, the margin of
|
||||||
|
error appears to be larger than that, therefor it should be reasonable to
|
||||||
|
assume that the overhead introduced by `CaptureMetrics` is absolutely
|
||||||
|
negligible.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT
|
20
vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
Normal file
20
vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
# OSX leaves these everywhere on SMB shares
|
||||||
|
._*
|
||||||
|
|
||||||
|
# Eclipse files
|
||||||
|
.classpath
|
||||||
|
.project
|
||||||
|
.settings/**
|
||||||
|
|
||||||
|
# Emacs save files
|
||||||
|
*~
|
||||||
|
|
||||||
|
# Vim-related files
|
||||||
|
[._]*.s[a-w][a-z]
|
||||||
|
[._]s[a-w][a-z]
|
||||||
|
*.un~
|
||||||
|
Session.vim
|
||||||
|
.netrwhist
|
||||||
|
|
||||||
|
# Go test binaries
|
||||||
|
*.test
|
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.3
|
||||||
|
- 1.4
|
||||||
|
script:
|
||||||
|
- go test
|
||||||
|
- go build
|
120
vendor/github.com/ghodss/yaml/README.md
generated
vendored
Normal file
120
vendor/github.com/ghodss/yaml/README.md
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
# YAML marshaling and unmarshaling support for Go
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||||
|
|
||||||
|
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||||
|
|
||||||
|
## Caveats
|
||||||
|
|
||||||
|
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, candiedyaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
|
||||||
|
|
||||||
|
```
|
||||||
|
BAD:
|
||||||
|
exampleKey: !!binary gIGC
|
||||||
|
|
||||||
|
GOOD:
|
||||||
|
exampleKey: gIGC
|
||||||
|
... and decode the base64 data in your code.
|
||||||
|
```
|
||||||
|
|
||||||
|
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
|
||||||
|
|
||||||
|
## Installation and usage
|
||||||
|
|
||||||
|
To install, run:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get github.com/ghodss/yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
And import using:
|
||||||
|
|
||||||
|
```
|
||||||
|
import "github.com/ghodss/yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
Usage is very similar to the JSON library:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Person struct {
|
||||||
|
Name string `json:"name"` // Affects YAML field names too.
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Marshal a Person struct to YAML.
|
||||||
|
p := Person{"John", 30}
|
||||||
|
y, err := yaml.Marshal(p)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(y))
|
||||||
|
/* Output:
|
||||||
|
age: 30
|
||||||
|
name: John
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Unmarshal the YAML back into a Person struct.
|
||||||
|
var p2 Person
|
||||||
|
err = yaml.Unmarshal(y, &p2)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(p2)
|
||||||
|
/* Output:
|
||||||
|
{John 30}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
)
|
||||||
|
func main() {
|
||||||
|
j := []byte(`{"name": "John", "age": 30}`)
|
||||||
|
y, err := yaml.JSONToYAML(j)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(y))
|
||||||
|
/* Output:
|
||||||
|
name: John
|
||||||
|
age: 30
|
||||||
|
*/
|
||||||
|
j2, err := yaml.YAMLToJSON(y)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(j2))
|
||||||
|
/* Output:
|
||||||
|
{"age":30,"name":"John"}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
```
|
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
Normal file
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
install:
|
||||||
|
go install
|
||||||
|
|
||||||
|
test: install generate-test-pbs
|
||||||
|
go test
|
||||||
|
|
||||||
|
|
||||||
|
generate-test-pbs:
|
||||||
|
make install
|
||||||
|
make -C testdata
|
||||||
|
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
|
||||||
|
make
|
33
vendor/github.com/golang/protobuf/protoc-gen-go/Makefile
generated
vendored
Normal file
33
vendor/github.com/golang/protobuf/protoc-gen-go/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
test:
|
||||||
|
cd testdata && make test
|
37
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
generated
vendored
Normal file
37
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
|
||||||
|
# at src/google/protobuf/descriptor.proto
|
||||||
|
regenerate:
|
||||||
|
@echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
|
||||||
|
cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto .
|
||||||
|
protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto
|
849
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
Normal file
849
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
Normal file
|
@ -0,0 +1,849 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Author: kenton@google.com (Kenton Varda)
|
||||||
|
// Based on original Protocol Buffers design by
|
||||||
|
// Sanjay Ghemawat, Jeff Dean, and others.
|
||||||
|
//
|
||||||
|
// The messages in this file describe the definitions found in .proto files.
|
||||||
|
// A valid .proto file can be translated directly to a FileDescriptorProto
|
||||||
|
// without any other information (e.g. without reading its imports).
|
||||||
|
|
||||||
|
|
||||||
|
syntax = "proto2";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "DescriptorProtos";
|
||||||
|
option csharp_namespace = "Google.Protobuf.Reflection";
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
|
// descriptor.proto must be optimized for speed because reflection-based
|
||||||
|
// algorithms don't work during bootstrapping.
|
||||||
|
option optimize_for = SPEED;
|
||||||
|
|
||||||
|
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
||||||
|
// files it parses.
|
||||||
|
message FileDescriptorSet {
|
||||||
|
repeated FileDescriptorProto file = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a complete .proto file.
|
||||||
|
message FileDescriptorProto {
|
||||||
|
optional string name = 1; // file name, relative to root of source tree
|
||||||
|
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
||||||
|
|
||||||
|
// Names of files imported by this file.
|
||||||
|
repeated string dependency = 3;
|
||||||
|
// Indexes of the public imported files in the dependency list above.
|
||||||
|
repeated int32 public_dependency = 10;
|
||||||
|
// Indexes of the weak imported files in the dependency list.
|
||||||
|
// For Google-internal migration only. Do not use.
|
||||||
|
repeated int32 weak_dependency = 11;
|
||||||
|
|
||||||
|
// All top-level definitions in this file.
|
||||||
|
repeated DescriptorProto message_type = 4;
|
||||||
|
repeated EnumDescriptorProto enum_type = 5;
|
||||||
|
repeated ServiceDescriptorProto service = 6;
|
||||||
|
repeated FieldDescriptorProto extension = 7;
|
||||||
|
|
||||||
|
optional FileOptions options = 8;
|
||||||
|
|
||||||
|
// This field contains optional information about the original source code.
|
||||||
|
// You may safely remove this entire field without harming runtime
|
||||||
|
// functionality of the descriptors -- the information is needed only by
|
||||||
|
// development tools.
|
||||||
|
optional SourceCodeInfo source_code_info = 9;
|
||||||
|
|
||||||
|
// The syntax of the proto file.
|
||||||
|
// The supported values are "proto2" and "proto3".
|
||||||
|
optional string syntax = 12;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a message type.
|
||||||
|
message DescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
repeated FieldDescriptorProto field = 2;
|
||||||
|
repeated FieldDescriptorProto extension = 6;
|
||||||
|
|
||||||
|
repeated DescriptorProto nested_type = 3;
|
||||||
|
repeated EnumDescriptorProto enum_type = 4;
|
||||||
|
|
||||||
|
message ExtensionRange {
|
||||||
|
optional int32 start = 1;
|
||||||
|
optional int32 end = 2;
|
||||||
|
|
||||||
|
optional ExtensionRangeOptions options = 3;
|
||||||
|
}
|
||||||
|
repeated ExtensionRange extension_range = 5;
|
||||||
|
|
||||||
|
repeated OneofDescriptorProto oneof_decl = 8;
|
||||||
|
|
||||||
|
optional MessageOptions options = 7;
|
||||||
|
|
||||||
|
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
||||||
|
// fields or extension ranges in the same message. Reserved ranges may
|
||||||
|
// not overlap.
|
||||||
|
message ReservedRange {
|
||||||
|
optional int32 start = 1; // Inclusive.
|
||||||
|
optional int32 end = 2; // Exclusive.
|
||||||
|
}
|
||||||
|
repeated ReservedRange reserved_range = 9;
|
||||||
|
// Reserved field names, which may not be used by fields in the same message.
|
||||||
|
// A given name may only be reserved once.
|
||||||
|
repeated string reserved_name = 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExtensionRangeOptions {
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a field within a message.
|
||||||
|
message FieldDescriptorProto {
|
||||||
|
enum Type {
|
||||||
|
// 0 is reserved for errors.
|
||||||
|
// Order is weird for historical reasons.
|
||||||
|
TYPE_DOUBLE = 1;
|
||||||
|
TYPE_FLOAT = 2;
|
||||||
|
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
||||||
|
// negative values are likely.
|
||||||
|
TYPE_INT64 = 3;
|
||||||
|
TYPE_UINT64 = 4;
|
||||||
|
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
||||||
|
// negative values are likely.
|
||||||
|
TYPE_INT32 = 5;
|
||||||
|
TYPE_FIXED64 = 6;
|
||||||
|
TYPE_FIXED32 = 7;
|
||||||
|
TYPE_BOOL = 8;
|
||||||
|
TYPE_STRING = 9;
|
||||||
|
// Tag-delimited aggregate.
|
||||||
|
// Group type is deprecated and not supported in proto3. However, Proto3
|
||||||
|
// implementations should still be able to parse the group wire format and
|
||||||
|
// treat group fields as unknown fields.
|
||||||
|
TYPE_GROUP = 10;
|
||||||
|
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
||||||
|
|
||||||
|
// New in version 2.
|
||||||
|
TYPE_BYTES = 12;
|
||||||
|
TYPE_UINT32 = 13;
|
||||||
|
TYPE_ENUM = 14;
|
||||||
|
TYPE_SFIXED32 = 15;
|
||||||
|
TYPE_SFIXED64 = 16;
|
||||||
|
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
||||||
|
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
||||||
|
};
|
||||||
|
|
||||||
|
enum Label {
|
||||||
|
// 0 is reserved for errors
|
||||||
|
LABEL_OPTIONAL = 1;
|
||||||
|
LABEL_REQUIRED = 2;
|
||||||
|
LABEL_REPEATED = 3;
|
||||||
|
};
|
||||||
|
|
||||||
|
optional string name = 1;
|
||||||
|
optional int32 number = 3;
|
||||||
|
optional Label label = 4;
|
||||||
|
|
||||||
|
// If type_name is set, this need not be set. If both this and type_name
|
||||||
|
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
||||||
|
optional Type type = 5;
|
||||||
|
|
||||||
|
// For message and enum types, this is the name of the type. If the name
|
||||||
|
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
||||||
|
// rules are used to find the type (i.e. first the nested types within this
|
||||||
|
// message are searched, then within the parent, on up to the root
|
||||||
|
// namespace).
|
||||||
|
optional string type_name = 6;
|
||||||
|
|
||||||
|
// For extensions, this is the name of the type being extended. It is
|
||||||
|
// resolved in the same manner as type_name.
|
||||||
|
optional string extendee = 2;
|
||||||
|
|
||||||
|
// For numeric types, contains the original text representation of the value.
|
||||||
|
// For booleans, "true" or "false".
|
||||||
|
// For strings, contains the default text contents (not escaped in any way).
|
||||||
|
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
||||||
|
// TODO(kenton): Base-64 encode?
|
||||||
|
optional string default_value = 7;
|
||||||
|
|
||||||
|
// If set, gives the index of a oneof in the containing type's oneof_decl
|
||||||
|
// list. This field is a member of that oneof.
|
||||||
|
optional int32 oneof_index = 9;
|
||||||
|
|
||||||
|
// JSON name of this field. The value is set by protocol compiler. If the
|
||||||
|
// user has set a "json_name" option on this field, that option's value
|
||||||
|
// will be used. Otherwise, it's deduced from the field's name by converting
|
||||||
|
// it to camelCase.
|
||||||
|
optional string json_name = 10;
|
||||||
|
|
||||||
|
optional FieldOptions options = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a oneof.
|
||||||
|
message OneofDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
optional OneofOptions options = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes an enum type.
|
||||||
|
message EnumDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
repeated EnumValueDescriptorProto value = 2;
|
||||||
|
|
||||||
|
optional EnumOptions options = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a value within an enum.
|
||||||
|
message EnumValueDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
optional int32 number = 2;
|
||||||
|
|
||||||
|
optional EnumValueOptions options = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a service.
|
||||||
|
message ServiceDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
repeated MethodDescriptorProto method = 2;
|
||||||
|
|
||||||
|
optional ServiceOptions options = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a method of a service.
|
||||||
|
message MethodDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
// Input and output type names. These are resolved in the same way as
|
||||||
|
// FieldDescriptorProto.type_name, but must refer to a message type.
|
||||||
|
optional string input_type = 2;
|
||||||
|
optional string output_type = 3;
|
||||||
|
|
||||||
|
optional MethodOptions options = 4;
|
||||||
|
|
||||||
|
// Identifies if client streams multiple client messages
|
||||||
|
optional bool client_streaming = 5 [default=false];
|
||||||
|
// Identifies if server streams multiple server messages
|
||||||
|
optional bool server_streaming = 6 [default=false];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ===================================================================
|
||||||
|
// Options
|
||||||
|
|
||||||
|
// Each of the definitions above may have "options" attached. These are
|
||||||
|
// just annotations which may cause code to be generated slightly differently
|
||||||
|
// or may contain hints for code that manipulates protocol messages.
|
||||||
|
//
|
||||||
|
// Clients may define custom options as extensions of the *Options messages.
|
||||||
|
// These extensions may not yet be known at parsing time, so the parser cannot
|
||||||
|
// store the values in them. Instead it stores them in a field in the *Options
|
||||||
|
// message called uninterpreted_option. This field must have the same name
|
||||||
|
// across all *Options messages. We then use this field to populate the
|
||||||
|
// extensions when we build a descriptor, at which point all protos have been
|
||||||
|
// parsed and so all extensions are known.
|
||||||
|
//
|
||||||
|
// Extension numbers for custom options may be chosen as follows:
|
||||||
|
// * For options which will only be used within a single application or
|
||||||
|
// organization, or for experimental options, use field numbers 50000
|
||||||
|
// through 99999. It is up to you to ensure that you do not use the
|
||||||
|
// same number for multiple options.
|
||||||
|
// * For options which will be published and used publicly by multiple
|
||||||
|
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
||||||
|
// to reserve extension numbers. Simply provide your project name (e.g.
|
||||||
|
// Objective-C plugin) and your project website (if available) -- there's no
|
||||||
|
// need to explain how you intend to use them. Usually you only need one
|
||||||
|
// extension number. You can declare multiple options with only one extension
|
||||||
|
// number by putting them in a sub-message. See the Custom Options section of
|
||||||
|
// the docs for examples:
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto#options
|
||||||
|
// If this turns out to be popular, a web service will be set up
|
||||||
|
// to automatically assign option numbers.
|
||||||
|
|
||||||
|
|
||||||
|
message FileOptions {
|
||||||
|
|
||||||
|
// Sets the Java package where classes generated from this .proto will be
|
||||||
|
// placed. By default, the proto package is used, but this is often
|
||||||
|
// inappropriate because proto packages do not normally start with backwards
|
||||||
|
// domain names.
|
||||||
|
optional string java_package = 1;
|
||||||
|
|
||||||
|
|
||||||
|
// If set, all the classes from the .proto file are wrapped in a single
|
||||||
|
// outer class with the given name. This applies to both Proto1
|
||||||
|
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
||||||
|
// a .proto always translates to a single class, but you may want to
|
||||||
|
// explicitly choose the class name).
|
||||||
|
optional string java_outer_classname = 8;
|
||||||
|
|
||||||
|
// If set true, then the Java code generator will generate a separate .java
|
||||||
|
// file for each top-level message, enum, and service defined in the .proto
|
||||||
|
// file. Thus, these types will *not* be nested inside the outer class
|
||||||
|
// named by java_outer_classname. However, the outer class will still be
|
||||||
|
// generated to contain the file's getDescriptor() method as well as any
|
||||||
|
// top-level extensions defined in the file.
|
||||||
|
optional bool java_multiple_files = 10 [default=false];
|
||||||
|
|
||||||
|
// This option does nothing.
|
||||||
|
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
|
||||||
|
|
||||||
|
// If set true, then the Java2 code generator will generate code that
|
||||||
|
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
||||||
|
// byte sequence to a string field.
|
||||||
|
// Message reflection will do the same.
|
||||||
|
// However, an extension field still accepts non-UTF-8 byte sequences.
|
||||||
|
// This option has no effect on when used with the lite runtime.
|
||||||
|
optional bool java_string_check_utf8 = 27 [default=false];
|
||||||
|
|
||||||
|
|
||||||
|
// Generated classes can be optimized for speed or code size.
|
||||||
|
enum OptimizeMode {
|
||||||
|
SPEED = 1; // Generate complete code for parsing, serialization,
|
||||||
|
// etc.
|
||||||
|
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
||||||
|
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
||||||
|
}
|
||||||
|
optional OptimizeMode optimize_for = 9 [default=SPEED];
|
||||||
|
|
||||||
|
// Sets the Go package where structs generated from this .proto will be
|
||||||
|
// placed. If omitted, the Go package will be derived from the following:
|
||||||
|
// - The basename of the package import path, if provided.
|
||||||
|
// - Otherwise, the package statement in the .proto file, if present.
|
||||||
|
// - Otherwise, the basename of the .proto file, without extension.
|
||||||
|
optional string go_package = 11;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Should generic services be generated in each language? "Generic" services
|
||||||
|
// are not specific to any particular RPC system. They are generated by the
|
||||||
|
// main code generators in each language (without additional plugins).
|
||||||
|
// Generic services were the only kind of service generation supported by
|
||||||
|
// early versions of google.protobuf.
|
||||||
|
//
|
||||||
|
// Generic services are now considered deprecated in favor of using plugins
|
||||||
|
// that generate code specific to your particular RPC system. Therefore,
|
||||||
|
// these default to false. Old code which depends on generic services should
|
||||||
|
// explicitly set them to true.
|
||||||
|
optional bool cc_generic_services = 16 [default=false];
|
||||||
|
optional bool java_generic_services = 17 [default=false];
|
||||||
|
optional bool py_generic_services = 18 [default=false];
|
||||||
|
optional bool php_generic_services = 42 [default=false];
|
||||||
|
|
||||||
|
// Is this file deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for everything in the file, or it will be completely ignored; in the very
|
||||||
|
// least, this is a formalization for deprecating files.
|
||||||
|
optional bool deprecated = 23 [default=false];
|
||||||
|
|
||||||
|
// Enables the use of arenas for the proto messages in this file. This applies
|
||||||
|
// only to generated classes for C++.
|
||||||
|
optional bool cc_enable_arenas = 31 [default=false];
|
||||||
|
|
||||||
|
|
||||||
|
// Sets the objective c class prefix which is prepended to all objective c
|
||||||
|
// generated classes from this .proto. There is no default.
|
||||||
|
optional string objc_class_prefix = 36;
|
||||||
|
|
||||||
|
// Namespace for generated classes; defaults to the package.
|
||||||
|
optional string csharp_namespace = 37;
|
||||||
|
|
||||||
|
// By default Swift generators will take the proto package and CamelCase it
|
||||||
|
// replacing '.' with underscore and use that to prefix the types/symbols
|
||||||
|
// defined. When this options is provided, they will use this value instead
|
||||||
|
// to prefix the types/symbols defined.
|
||||||
|
optional string swift_prefix = 39;
|
||||||
|
|
||||||
|
// Sets the php class prefix which is prepended to all php generated classes
|
||||||
|
// from this .proto. Default is empty.
|
||||||
|
optional string php_class_prefix = 40;
|
||||||
|
|
||||||
|
// Use this option to change the namespace of php generated classes. Default
|
||||||
|
// is empty. When this option is empty, the package name will be used for
|
||||||
|
// determining the namespace.
|
||||||
|
optional string php_namespace = 41;
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
|
||||||
|
reserved 38;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MessageOptions {
|
||||||
|
// Set true to use the old proto1 MessageSet wire format for extensions.
|
||||||
|
// This is provided for backwards-compatibility with the MessageSet wire
|
||||||
|
// format. You should not use this for any other reason: It's less
|
||||||
|
// efficient, has fewer features, and is more complicated.
|
||||||
|
//
|
||||||
|
// The message must be defined exactly as follows:
|
||||||
|
// message Foo {
|
||||||
|
// option message_set_wire_format = true;
|
||||||
|
// extensions 4 to max;
|
||||||
|
// }
|
||||||
|
// Note that the message cannot have any defined fields; MessageSets only
|
||||||
|
// have extensions.
|
||||||
|
//
|
||||||
|
// All extensions of your type must be singular messages; e.g. they cannot
|
||||||
|
// be int32s, enums, or repeated messages.
|
||||||
|
//
|
||||||
|
// Because this is an option, the above two restrictions are not enforced by
|
||||||
|
// the protocol compiler.
|
||||||
|
optional bool message_set_wire_format = 1 [default=false];
|
||||||
|
|
||||||
|
// Disables the generation of the standard "descriptor()" accessor, which can
|
||||||
|
// conflict with a field of the same name. This is meant to make migration
|
||||||
|
// from proto1 easier; new code should avoid fields named "descriptor".
|
||||||
|
optional bool no_standard_descriptor_accessor = 2 [default=false];
|
||||||
|
|
||||||
|
// Is this message deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the message, or it will be completely ignored; in the very least,
|
||||||
|
// this is a formalization for deprecating messages.
|
||||||
|
optional bool deprecated = 3 [default=false];
|
||||||
|
|
||||||
|
// Whether the message is an automatically generated map entry type for the
|
||||||
|
// maps field.
|
||||||
|
//
|
||||||
|
// For maps fields:
|
||||||
|
// map<KeyType, ValueType> map_field = 1;
|
||||||
|
// The parsed descriptor looks like:
|
||||||
|
// message MapFieldEntry {
|
||||||
|
// option map_entry = true;
|
||||||
|
// optional KeyType key = 1;
|
||||||
|
// optional ValueType value = 2;
|
||||||
|
// }
|
||||||
|
// repeated MapFieldEntry map_field = 1;
|
||||||
|
//
|
||||||
|
// Implementations may choose not to generate the map_entry=true message, but
|
||||||
|
// use a native map in the target language to hold the keys and values.
|
||||||
|
// The reflection APIs in such implementions still need to work as
|
||||||
|
// if the field is a repeated message field.
|
||||||
|
//
|
||||||
|
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
||||||
|
// instead. The option should only be implicitly set by the proto compiler
|
||||||
|
// parser.
|
||||||
|
optional bool map_entry = 7;
|
||||||
|
|
||||||
|
reserved 8; // javalite_serializable
|
||||||
|
reserved 9; // javanano_as_lite
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FieldOptions {
|
||||||
|
// The ctype option instructs the C++ code generator to use a different
|
||||||
|
// representation of the field than it normally would. See the specific
|
||||||
|
// options below. This option is not yet implemented in the open source
|
||||||
|
// release -- sorry, we'll try to include it in a future version!
|
||||||
|
optional CType ctype = 1 [default = STRING];
|
||||||
|
enum CType {
|
||||||
|
// Default mode.
|
||||||
|
STRING = 0;
|
||||||
|
|
||||||
|
CORD = 1;
|
||||||
|
|
||||||
|
STRING_PIECE = 2;
|
||||||
|
}
|
||||||
|
// The packed option can be enabled for repeated primitive fields to enable
|
||||||
|
// a more efficient representation on the wire. Rather than repeatedly
|
||||||
|
// writing the tag and type for each element, the entire array is encoded as
|
||||||
|
// a single length-delimited blob. In proto3, only explicit setting it to
|
||||||
|
// false will avoid using packed encoding.
|
||||||
|
optional bool packed = 2;
|
||||||
|
|
||||||
|
// The jstype option determines the JavaScript type used for values of the
|
||||||
|
// field. The option is permitted only for 64 bit integral and fixed types
|
||||||
|
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
|
||||||
|
// is represented as JavaScript string, which avoids loss of precision that
|
||||||
|
// can happen when a large value is converted to a floating point JavaScript.
|
||||||
|
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
|
||||||
|
// use the JavaScript "number" type. The behavior of the default option
|
||||||
|
// JS_NORMAL is implementation dependent.
|
||||||
|
//
|
||||||
|
// This option is an enum to permit additional types to be added, e.g.
|
||||||
|
// goog.math.Integer.
|
||||||
|
optional JSType jstype = 6 [default = JS_NORMAL];
|
||||||
|
enum JSType {
|
||||||
|
// Use the default type.
|
||||||
|
JS_NORMAL = 0;
|
||||||
|
|
||||||
|
// Use JavaScript strings.
|
||||||
|
JS_STRING = 1;
|
||||||
|
|
||||||
|
// Use JavaScript numbers.
|
||||||
|
JS_NUMBER = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should this field be parsed lazily? Lazy applies only to message-type
|
||||||
|
// fields. It means that when the outer message is initially parsed, the
|
||||||
|
// inner message's contents will not be parsed but instead stored in encoded
|
||||||
|
// form. The inner message will actually be parsed when it is first accessed.
|
||||||
|
//
|
||||||
|
// This is only a hint. Implementations are free to choose whether to use
|
||||||
|
// eager or lazy parsing regardless of the value of this option. However,
|
||||||
|
// setting this option true suggests that the protocol author believes that
|
||||||
|
// using lazy parsing on this field is worth the additional bookkeeping
|
||||||
|
// overhead typically needed to implement it.
|
||||||
|
//
|
||||||
|
// This option does not affect the public interface of any generated code;
|
||||||
|
// all method signatures remain the same. Furthermore, thread-safety of the
|
||||||
|
// interface is not affected by this option; const methods remain safe to
|
||||||
|
// call from multiple threads concurrently, while non-const methods continue
|
||||||
|
// to require exclusive access.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Note that implementations may choose not to check required fields within
|
||||||
|
// a lazy sub-message. That is, calling IsInitialized() on the outer message
|
||||||
|
// may return true even if the inner message has missing required fields.
|
||||||
|
// This is necessary because otherwise the inner message would have to be
|
||||||
|
// parsed in order to perform the check, defeating the purpose of lazy
|
||||||
|
// parsing. An implementation which chooses not to check required fields
|
||||||
|
// must be consistent about it. That is, for any particular sub-message, the
|
||||||
|
// implementation must either *always* check its required fields, or *never*
|
||||||
|
// check its required fields, regardless of whether or not the message has
|
||||||
|
// been parsed.
|
||||||
|
optional bool lazy = 5 [default=false];
|
||||||
|
|
||||||
|
// Is this field deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for accessors, or it will be completely ignored; in the very least, this
|
||||||
|
// is a formalization for deprecating fields.
|
||||||
|
optional bool deprecated = 3 [default=false];
|
||||||
|
|
||||||
|
// For Google-internal migration only. Do not use.
|
||||||
|
optional bool weak = 10 [default=false];
|
||||||
|
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
|
||||||
|
reserved 4; // removed jtype
|
||||||
|
}
|
||||||
|
|
||||||
|
message OneofOptions {
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EnumOptions {
|
||||||
|
|
||||||
|
// Set this option to true to allow mapping different tag names to the same
|
||||||
|
// value.
|
||||||
|
optional bool allow_alias = 2;
|
||||||
|
|
||||||
|
// Is this enum deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the enum, or it will be completely ignored; in the very least, this
|
||||||
|
// is a formalization for deprecating enums.
|
||||||
|
optional bool deprecated = 3 [default=false];
|
||||||
|
|
||||||
|
reserved 5; // javanano_as_lite
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EnumValueOptions {
|
||||||
|
// Is this enum value deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the enum value, or it will be completely ignored; in the very least,
|
||||||
|
// this is a formalization for deprecating enum values.
|
||||||
|
optional bool deprecated = 1 [default=false];
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ServiceOptions {
|
||||||
|
|
||||||
|
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||||
|
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||||
|
// we were already using them long before we decided to release Protocol
|
||||||
|
// Buffers.
|
||||||
|
|
||||||
|
// Is this service deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the service, or it will be completely ignored; in the very least,
|
||||||
|
// this is a formalization for deprecating services.
|
||||||
|
optional bool deprecated = 33 [default=false];
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MethodOptions {
|
||||||
|
|
||||||
|
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||||
|
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||||
|
// we were already using them long before we decided to release Protocol
|
||||||
|
// Buffers.
|
||||||
|
|
||||||
|
// Is this method deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the method, or it will be completely ignored; in the very least,
|
||||||
|
// this is a formalization for deprecating methods.
|
||||||
|
optional bool deprecated = 33 [default=false];
|
||||||
|
|
||||||
|
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
|
||||||
|
// or neither? HTTP based RPC implementation may choose GET verb for safe
|
||||||
|
// methods, and PUT verb for idempotent methods instead of the default POST.
|
||||||
|
enum IdempotencyLevel {
|
||||||
|
IDEMPOTENCY_UNKNOWN = 0;
|
||||||
|
NO_SIDE_EFFECTS = 1; // implies idempotent
|
||||||
|
IDEMPOTENT = 2; // idempotent, but may have side effects
|
||||||
|
}
|
||||||
|
optional IdempotencyLevel idempotency_level =
|
||||||
|
34 [default=IDEMPOTENCY_UNKNOWN];
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// A message representing a option the parser does not recognize. This only
|
||||||
|
// appears in options protos created by the compiler::Parser class.
|
||||||
|
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
||||||
|
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
||||||
|
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
||||||
|
// in them.
|
||||||
|
message UninterpretedOption {
|
||||||
|
// The name of the uninterpreted option. Each string represents a segment in
|
||||||
|
// a dot-separated name. is_extension is true iff a segment represents an
|
||||||
|
// extension (denoted with parentheses in options specs in .proto files).
|
||||||
|
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
||||||
|
// "foo.(bar.baz).qux".
|
||||||
|
message NamePart {
|
||||||
|
required string name_part = 1;
|
||||||
|
required bool is_extension = 2;
|
||||||
|
}
|
||||||
|
repeated NamePart name = 2;
|
||||||
|
|
||||||
|
// The value of the uninterpreted option, in whatever type the tokenizer
|
||||||
|
// identified it as during parsing. Exactly one of these should be set.
|
||||||
|
optional string identifier_value = 3;
|
||||||
|
optional uint64 positive_int_value = 4;
|
||||||
|
optional int64 negative_int_value = 5;
|
||||||
|
optional double double_value = 6;
|
||||||
|
optional bytes string_value = 7;
|
||||||
|
optional string aggregate_value = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===================================================================
|
||||||
|
// Optional source code info
|
||||||
|
|
||||||
|
// Encapsulates information about the original source file from which a
|
||||||
|
// FileDescriptorProto was generated.
|
||||||
|
message SourceCodeInfo {
|
||||||
|
// A Location identifies a piece of source code in a .proto file which
|
||||||
|
// corresponds to a particular definition. This information is intended
|
||||||
|
// to be useful to IDEs, code indexers, documentation generators, and similar
|
||||||
|
// tools.
|
||||||
|
//
|
||||||
|
// For example, say we have a file like:
|
||||||
|
// message Foo {
|
||||||
|
// optional string foo = 1;
|
||||||
|
// }
|
||||||
|
// Let's look at just the field definition:
|
||||||
|
// optional string foo = 1;
|
||||||
|
// ^ ^^ ^^ ^ ^^^
|
||||||
|
// a bc de f ghi
|
||||||
|
// We have the following locations:
|
||||||
|
// span path represents
|
||||||
|
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
||||||
|
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
||||||
|
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
||||||
|
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
||||||
|
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
||||||
|
//
|
||||||
|
// Notes:
|
||||||
|
// - A location may refer to a repeated field itself (i.e. not to any
|
||||||
|
// particular index within it). This is used whenever a set of elements are
|
||||||
|
// logically enclosed in a single code segment. For example, an entire
|
||||||
|
// extend block (possibly containing multiple extension definitions) will
|
||||||
|
// have an outer location whose path refers to the "extensions" repeated
|
||||||
|
// field without an index.
|
||||||
|
// - Multiple locations may have the same path. This happens when a single
|
||||||
|
// logical declaration is spread out across multiple places. The most
|
||||||
|
// obvious example is the "extend" block again -- there may be multiple
|
||||||
|
// extend blocks in the same scope, each of which will have the same path.
|
||||||
|
// - A location's span is not always a subset of its parent's span. For
|
||||||
|
// example, the "extendee" of an extension declaration appears at the
|
||||||
|
// beginning of the "extend" block and is shared by all extensions within
|
||||||
|
// the block.
|
||||||
|
// - Just because a location's span is a subset of some other location's span
|
||||||
|
// does not mean that it is a descendent. For example, a "group" defines
|
||||||
|
// both a type and a field in a single declaration. Thus, the locations
|
||||||
|
// corresponding to the type and field and their components will overlap.
|
||||||
|
// - Code which tries to interpret locations should probably be designed to
|
||||||
|
// ignore those that it doesn't understand, as more types of locations could
|
||||||
|
// be recorded in the future.
|
||||||
|
repeated Location location = 1;
|
||||||
|
message Location {
|
||||||
|
// Identifies which part of the FileDescriptorProto was defined at this
|
||||||
|
// location.
|
||||||
|
//
|
||||||
|
// Each element is a field number or an index. They form a path from
|
||||||
|
// the root FileDescriptorProto to the place where the definition. For
|
||||||
|
// example, this path:
|
||||||
|
// [ 4, 3, 2, 7, 1 ]
|
||||||
|
// refers to:
|
||||||
|
// file.message_type(3) // 4, 3
|
||||||
|
// .field(7) // 2, 7
|
||||||
|
// .name() // 1
|
||||||
|
// This is because FileDescriptorProto.message_type has field number 4:
|
||||||
|
// repeated DescriptorProto message_type = 4;
|
||||||
|
// and DescriptorProto.field has field number 2:
|
||||||
|
// repeated FieldDescriptorProto field = 2;
|
||||||
|
// and FieldDescriptorProto.name has field number 1:
|
||||||
|
// optional string name = 1;
|
||||||
|
//
|
||||||
|
// Thus, the above path gives the location of a field name. If we removed
|
||||||
|
// the last element:
|
||||||
|
// [ 4, 3, 2, 7 ]
|
||||||
|
// this path refers to the whole field declaration (from the beginning
|
||||||
|
// of the label to the terminating semicolon).
|
||||||
|
repeated int32 path = 1 [packed=true];
|
||||||
|
|
||||||
|
// Always has exactly three or four elements: start line, start column,
|
||||||
|
// end line (optional, otherwise assumed same as start line), end column.
|
||||||
|
// These are packed into a single field for efficiency. Note that line
|
||||||
|
// and column numbers are zero-based -- typically you will want to add
|
||||||
|
// 1 to each before displaying to a user.
|
||||||
|
repeated int32 span = 2 [packed=true];
|
||||||
|
|
||||||
|
// If this SourceCodeInfo represents a complete declaration, these are any
|
||||||
|
// comments appearing before and after the declaration which appear to be
|
||||||
|
// attached to the declaration.
|
||||||
|
//
|
||||||
|
// A series of line comments appearing on consecutive lines, with no other
|
||||||
|
// tokens appearing on those lines, will be treated as a single comment.
|
||||||
|
//
|
||||||
|
// leading_detached_comments will keep paragraphs of comments that appear
|
||||||
|
// before (but not connected to) the current element. Each paragraph,
|
||||||
|
// separated by empty lines, will be one comment element in the repeated
|
||||||
|
// field.
|
||||||
|
//
|
||||||
|
// Only the comment content is provided; comment markers (e.g. //) are
|
||||||
|
// stripped out. For block comments, leading whitespace and an asterisk
|
||||||
|
// will be stripped from the beginning of each line other than the first.
|
||||||
|
// Newlines are included in the output.
|
||||||
|
//
|
||||||
|
// Examples:
|
||||||
|
//
|
||||||
|
// optional int32 foo = 1; // Comment attached to foo.
|
||||||
|
// // Comment attached to bar.
|
||||||
|
// optional int32 bar = 2;
|
||||||
|
//
|
||||||
|
// optional string baz = 3;
|
||||||
|
// // Comment attached to baz.
|
||||||
|
// // Another line attached to baz.
|
||||||
|
//
|
||||||
|
// // Comment attached to qux.
|
||||||
|
// //
|
||||||
|
// // Another line attached to qux.
|
||||||
|
// optional double qux = 4;
|
||||||
|
//
|
||||||
|
// // Detached comment for corge. This is not leading or trailing comments
|
||||||
|
// // to qux or corge because there are blank lines separating it from
|
||||||
|
// // both.
|
||||||
|
//
|
||||||
|
// // Detached comment for corge paragraph 2.
|
||||||
|
//
|
||||||
|
// optional string corge = 5;
|
||||||
|
// /* Block comment attached
|
||||||
|
// * to corge. Leading asterisks
|
||||||
|
// * will be removed. */
|
||||||
|
// /* Block comment attached to
|
||||||
|
// * grault. */
|
||||||
|
// optional int32 grault = 6;
|
||||||
|
//
|
||||||
|
// // ignored detached comments.
|
||||||
|
optional string leading_comments = 3;
|
||||||
|
optional string trailing_comments = 4;
|
||||||
|
repeated string leading_detached_comments = 6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes the relationship between generated code and its original source
|
||||||
|
// file. A GeneratedCodeInfo message is associated with only one generated
|
||||||
|
// source file, but may contain references to different source .proto files.
|
||||||
|
message GeneratedCodeInfo {
|
||||||
|
// An Annotation connects some span of text in generated code to an element
|
||||||
|
// of its generating .proto file.
|
||||||
|
repeated Annotation annotation = 1;
|
||||||
|
message Annotation {
|
||||||
|
// Identifies the element in the original source .proto file. This field
|
||||||
|
// is formatted the same as SourceCodeInfo.Location.path.
|
||||||
|
repeated int32 path = 1 [packed=true];
|
||||||
|
|
||||||
|
// Identifies the filesystem path to the original source .proto.
|
||||||
|
optional string source_file = 2;
|
||||||
|
|
||||||
|
// Identifies the starting offset in bytes in the generated code
|
||||||
|
// that relates to the identified object.
|
||||||
|
optional int32 begin = 3;
|
||||||
|
|
||||||
|
// Identifies the ending offset in bytes in the generated code that
|
||||||
|
// relates to the identified offset. The end offset should be one past
|
||||||
|
// the last relevant byte (so the length of the text = end - begin).
|
||||||
|
optional int32 end = 4;
|
||||||
|
}
|
||||||
|
}
|
40
vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile
generated
vendored
Normal file
40
vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
include $(GOROOT)/src/Make.inc
|
||||||
|
|
||||||
|
TARG=github.com/golang/protobuf/compiler/generator
|
||||||
|
GOFILES=\
|
||||||
|
generator.go\
|
||||||
|
|
||||||
|
DEPS=../descriptor ../plugin ../../proto
|
||||||
|
|
||||||
|
include $(GOROOT)/src/Make.pkg
|
45
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile
generated
vendored
Normal file
45
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
# Not stored here, but plugin.proto is in https://github.com/google/protobuf/
|
||||||
|
# at src/google/protobuf/compiler/plugin.proto
|
||||||
|
# Also we need to fix an import.
|
||||||
|
regenerate:
|
||||||
|
@echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
|
||||||
|
cp $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto .
|
||||||
|
protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor:../../../../.. \
|
||||||
|
-I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto
|
||||||
|
|
||||||
|
restore:
|
||||||
|
cp plugin.pb.golden plugin.pb.go
|
||||||
|
|
||||||
|
preserve:
|
||||||
|
cp plugin.pb.go plugin.pb.golden
|
83
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
generated
vendored
Normal file
83
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
// Code generated by protoc-gen-go.
|
||||||
|
// source: google/protobuf/compiler/plugin.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package google_protobuf_compiler
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import "math"
|
||||||
|
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||||
|
|
||||||
|
// Reference proto and math imports to suppress error if they are not otherwise used.
|
||||||
|
var _ = proto.GetString
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type CodeGeneratorRequest struct {
|
||||||
|
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
|
||||||
|
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
|
||||||
|
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} }
|
||||||
|
func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
|
||||||
|
func (*CodeGeneratorRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (this *CodeGeneratorRequest) GetParameter() string {
|
||||||
|
if this != nil && this.Parameter != nil {
|
||||||
|
return *this.Parameter
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type CodeGeneratorResponse struct {
|
||||||
|
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
|
||||||
|
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} }
|
||||||
|
func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
|
||||||
|
func (*CodeGeneratorResponse) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (this *CodeGeneratorResponse) GetError() string {
|
||||||
|
if this != nil && this.Error != nil {
|
||||||
|
return *this.Error
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type CodeGeneratorResponse_File struct {
|
||||||
|
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
|
||||||
|
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} }
|
||||||
|
func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
|
||||||
|
func (*CodeGeneratorResponse_File) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (this *CodeGeneratorResponse_File) GetName() string {
|
||||||
|
if this != nil && this.Name != nil {
|
||||||
|
return *this.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
|
||||||
|
if this != nil && this.InsertionPoint != nil {
|
||||||
|
return *this.InsertionPoint
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *CodeGeneratorResponse_File) GetContent() string {
|
||||||
|
if this != nil && this.Content != nil {
|
||||||
|
return *this.Content
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
}
|
167
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
generated
vendored
Normal file
167
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Author: kenton@google.com (Kenton Varda)
|
||||||
|
//
|
||||||
|
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
|
||||||
|
// change.
|
||||||
|
//
|
||||||
|
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
|
||||||
|
// just a program that reads a CodeGeneratorRequest from stdin and writes a
|
||||||
|
// CodeGeneratorResponse to stdout.
|
||||||
|
//
|
||||||
|
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
|
||||||
|
// of dealing with the raw protocol defined here.
|
||||||
|
//
|
||||||
|
// A plugin executable needs only to be placed somewhere in the path. The
|
||||||
|
// plugin should be named "protoc-gen-$NAME", and will then be used when the
|
||||||
|
// flag "--${NAME}_out" is passed to protoc.
|
||||||
|
|
||||||
|
syntax = "proto2";
|
||||||
|
package google.protobuf.compiler;
|
||||||
|
option java_package = "com.google.protobuf.compiler";
|
||||||
|
option java_outer_classname = "PluginProtos";
|
||||||
|
|
||||||
|
option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
|
||||||
|
|
||||||
|
import "google/protobuf/descriptor.proto";
|
||||||
|
|
||||||
|
// The version number of protocol compiler.
|
||||||
|
message Version {
|
||||||
|
optional int32 major = 1;
|
||||||
|
optional int32 minor = 2;
|
||||||
|
optional int32 patch = 3;
|
||||||
|
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||||
|
// be empty for mainline stable releases.
|
||||||
|
optional string suffix = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
|
||||||
|
message CodeGeneratorRequest {
|
||||||
|
// The .proto files that were explicitly listed on the command-line. The
|
||||||
|
// code generator should generate code only for these files. Each file's
|
||||||
|
// descriptor will be included in proto_file, below.
|
||||||
|
repeated string file_to_generate = 1;
|
||||||
|
|
||||||
|
// The generator parameter passed on the command-line.
|
||||||
|
optional string parameter = 2;
|
||||||
|
|
||||||
|
// FileDescriptorProtos for all files in files_to_generate and everything
|
||||||
|
// they import. The files will appear in topological order, so each file
|
||||||
|
// appears before any file that imports it.
|
||||||
|
//
|
||||||
|
// protoc guarantees that all proto_files will be written after
|
||||||
|
// the fields above, even though this is not technically guaranteed by the
|
||||||
|
// protobuf wire format. This theoretically could allow a plugin to stream
|
||||||
|
// in the FileDescriptorProtos and handle them one by one rather than read
|
||||||
|
// the entire set into memory at once. However, as of this writing, this
|
||||||
|
// is not similarly optimized on protoc's end -- it will store all fields in
|
||||||
|
// memory at once before sending them to the plugin.
|
||||||
|
//
|
||||||
|
// Type names of fields and extensions in the FileDescriptorProto are always
|
||||||
|
// fully qualified.
|
||||||
|
repeated FileDescriptorProto proto_file = 15;
|
||||||
|
|
||||||
|
// The version number of protocol compiler.
|
||||||
|
optional Version compiler_version = 3;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// The plugin writes an encoded CodeGeneratorResponse to stdout.
|
||||||
|
message CodeGeneratorResponse {
|
||||||
|
// Error message. If non-empty, code generation failed. The plugin process
|
||||||
|
// should exit with status code zero even if it reports an error in this way.
|
||||||
|
//
|
||||||
|
// This should be used to indicate errors in .proto files which prevent the
|
||||||
|
// code generator from generating correct code. Errors which indicate a
|
||||||
|
// problem in protoc itself -- such as the input CodeGeneratorRequest being
|
||||||
|
// unparseable -- should be reported by writing a message to stderr and
|
||||||
|
// exiting with a non-zero status code.
|
||||||
|
optional string error = 1;
|
||||||
|
|
||||||
|
// Represents a single generated file.
|
||||||
|
message File {
|
||||||
|
// The file name, relative to the output directory. The name must not
|
||||||
|
// contain "." or ".." components and must be relative, not be absolute (so,
|
||||||
|
// the file cannot lie outside the output directory). "/" must be used as
|
||||||
|
// the path separator, not "\".
|
||||||
|
//
|
||||||
|
// If the name is omitted, the content will be appended to the previous
|
||||||
|
// file. This allows the generator to break large files into small chunks,
|
||||||
|
// and allows the generated text to be streamed back to protoc so that large
|
||||||
|
// files need not reside completely in memory at one time. Note that as of
|
||||||
|
// this writing protoc does not optimize for this -- it will read the entire
|
||||||
|
// CodeGeneratorResponse before writing files to disk.
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
// If non-empty, indicates that the named file should already exist, and the
|
||||||
|
// content here is to be inserted into that file at a defined insertion
|
||||||
|
// point. This feature allows a code generator to extend the output
|
||||||
|
// produced by another code generator. The original generator may provide
|
||||||
|
// insertion points by placing special annotations in the file that look
|
||||||
|
// like:
|
||||||
|
// @@protoc_insertion_point(NAME)
|
||||||
|
// The annotation can have arbitrary text before and after it on the line,
|
||||||
|
// which allows it to be placed in a comment. NAME should be replaced with
|
||||||
|
// an identifier naming the point -- this is what other generators will use
|
||||||
|
// as the insertion_point. Code inserted at this point will be placed
|
||||||
|
// immediately above the line containing the insertion point (thus multiple
|
||||||
|
// insertions to the same point will come out in the order they were added).
|
||||||
|
// The double-@ is intended to make it unlikely that the generated code
|
||||||
|
// could contain things that look like insertion points by accident.
|
||||||
|
//
|
||||||
|
// For example, the C++ code generator places the following line in the
|
||||||
|
// .pb.h files that it generates:
|
||||||
|
// // @@protoc_insertion_point(namespace_scope)
|
||||||
|
// This line appears within the scope of the file's package namespace, but
|
||||||
|
// outside of any particular class. Another plugin can then specify the
|
||||||
|
// insertion_point "namespace_scope" to generate additional classes or
|
||||||
|
// other declarations that should be placed in this scope.
|
||||||
|
//
|
||||||
|
// Note that if the line containing the insertion point begins with
|
||||||
|
// whitespace, the same whitespace will be added to every line of the
|
||||||
|
// inserted text. This is useful for languages like Python, where
|
||||||
|
// indentation matters. In these languages, the insertion point comment
|
||||||
|
// should be indented the same amount as any inserted code will need to be
|
||||||
|
// in order to work correctly in that context.
|
||||||
|
//
|
||||||
|
// The code generator that generates the initial file and the one which
|
||||||
|
// inserts into it must both run as part of a single invocation of protoc.
|
||||||
|
// Code generators are executed in the order in which they appear on the
|
||||||
|
// command line.
|
||||||
|
//
|
||||||
|
// If |insertion_point| is present, |name| must also be present.
|
||||||
|
optional string insertion_point = 2;
|
||||||
|
|
||||||
|
// The file contents.
|
||||||
|
optional string content = 15;
|
||||||
|
}
|
||||||
|
repeated File file = 15;
|
||||||
|
}
|
149
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
149
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
|
||||||
|
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||||
|
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "AnyProto";
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
|
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||||
|
// URL that describes the type of the serialized message.
|
||||||
|
//
|
||||||
|
// Protobuf library provides support to pack/unpack Any values in the form
|
||||||
|
// of utility functions or additional generated methods of the Any type.
|
||||||
|
//
|
||||||
|
// Example 1: Pack and unpack a message in C++.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any;
|
||||||
|
// any.PackFrom(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.UnpackTo(&foo)) {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Pack and unpack a message in Java.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any = Any.pack(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.is(Foo.class)) {
|
||||||
|
// foo = any.unpack(Foo.class);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Pack and unpack a message in Python.
|
||||||
|
//
|
||||||
|
// foo = Foo(...)
|
||||||
|
// any = Any()
|
||||||
|
// any.Pack(foo)
|
||||||
|
// ...
|
||||||
|
// if any.Is(Foo.DESCRIPTOR):
|
||||||
|
// any.Unpack(foo)
|
||||||
|
// ...
|
||||||
|
//
|
||||||
|
// Example 4: Pack and unpack a message in Go
|
||||||
|
//
|
||||||
|
// foo := &pb.Foo{...}
|
||||||
|
// any, err := ptypes.MarshalAny(foo)
|
||||||
|
// ...
|
||||||
|
// foo := &pb.Foo{}
|
||||||
|
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The pack methods provided by protobuf library will by default use
|
||||||
|
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||||
|
// methods only use the fully qualified type name after the last '/'
|
||||||
|
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||||
|
// name "y.z".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// JSON
|
||||||
|
// ====
|
||||||
|
// The JSON representation of an `Any` value uses the regular
|
||||||
|
// representation of the deserialized, embedded message, with an
|
||||||
|
// additional field `@type` which contains the type URL. Example:
|
||||||
|
//
|
||||||
|
// package google.profile;
|
||||||
|
// message Person {
|
||||||
|
// string first_name = 1;
|
||||||
|
// string last_name = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.profile.Person",
|
||||||
|
// "firstName": <string>,
|
||||||
|
// "lastName": <string>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If the embedded message type is well-known and has a custom JSON
|
||||||
|
// representation, that representation will be embedded adding a field
|
||||||
|
// `value` which holds the custom JSON in addition to the `@type`
|
||||||
|
// field. Example (for message [google.protobuf.Duration][]):
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
// "value": "1.212s"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
message Any {
|
||||||
|
// A URL/resource name whose content describes the type of the
|
||||||
|
// serialized protocol buffer message.
|
||||||
|
//
|
||||||
|
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||||
|
// following restrictions and interpretations apply:
|
||||||
|
//
|
||||||
|
// * If no scheme is provided, `https` is assumed.
|
||||||
|
// * The last segment of the URL's path must represent the fully
|
||||||
|
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||||
|
// The name should be in a canonical form (e.g., leading "." is
|
||||||
|
// not accepted).
|
||||||
|
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||||
|
// value in binary format, or produce an error.
|
||||||
|
// * Applications are allowed to cache lookup results based on the
|
||||||
|
// URL, or have them precompiled into a binary to avoid any
|
||||||
|
// lookup. Therefore, binary compatibility needs to be preserved
|
||||||
|
// on changes to types. (Use versioned type names to manage
|
||||||
|
// breaking changes.)
|
||||||
|
//
|
||||||
|
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||||
|
// used with implementation specific semantics.
|
||||||
|
//
|
||||||
|
string type_url = 1;
|
||||||
|
|
||||||
|
// Must be a valid serialized protocol buffer of the above specified type.
|
||||||
|
bytes value = 2;
|
||||||
|
}
|
18
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
language: go
|
||||||
|
sudo: false
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- go: 1.3
|
||||||
|
- go: 1.4
|
||||||
|
- go: 1.5
|
||||||
|
- go: 1.6
|
||||||
|
- go: tip
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go get -t -v ./...
|
||||||
|
- diff -u <(echo -n) <(gofmt -d .)
|
||||||
|
- go vet $(go list ./... | grep -v /vendor/)
|
||||||
|
- go test -v -race ./...
|
7
vendor/github.com/gorilla/context/README.md
generated
vendored
Normal file
7
vendor/github.com/gorilla/context/README.md
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
context
|
||||||
|
=======
|
||||||
|
[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
|
||||||
|
|
||||||
|
gorilla/context is a general purpose registry for global request variables.
|
||||||
|
|
||||||
|
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
|
18
vendor/github.com/gorilla/handlers/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/gorilla/handlers/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
language: go
|
||||||
|
sudo: false
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- go: 1.4
|
||||||
|
- go: 1.5
|
||||||
|
- go: 1.6
|
||||||
|
- go: 1.7
|
||||||
|
- go: tip
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go get -t -v ./...
|
||||||
|
- diff -u <(echo -n) <(gofmt -d .)
|
||||||
|
- go vet $(go list ./... | grep -v /vendor/)
|
||||||
|
- go test -v -race ./...
|
53
vendor/github.com/gorilla/handlers/README.md
generated
vendored
Normal file
53
vendor/github.com/gorilla/handlers/README.md
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
gorilla/handlers
|
||||||
|
================
|
||||||
|
[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers)
|
||||||
|
|
||||||
|
Package handlers is a collection of handlers (aka "HTTP middleware") for use
|
||||||
|
with Go's `net/http` package (or any framework supporting `http.Handler`), including:
|
||||||
|
|
||||||
|
* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log
|
||||||
|
Format](http://httpd.apache.org/docs/2.2/logs.html#common).
|
||||||
|
* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log
|
||||||
|
Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by
|
||||||
|
both Apache and nginx.
|
||||||
|
* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses.
|
||||||
|
* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted
|
||||||
|
content types.
|
||||||
|
* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a
|
||||||
|
`map[string]http.Handler`
|
||||||
|
* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the
|
||||||
|
`X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded`
|
||||||
|
headers when running a Go server behind a HTTP reverse proxy.
|
||||||
|
* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple
|
||||||
|
domains (i.e. multiple CNAME aliases).
|
||||||
|
* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics.
|
||||||
|
|
||||||
|
Other handlers are documented [on the Gorilla
|
||||||
|
website](http://www.gorillatoolkit.org/pkg/handlers).
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := http.NewServeMux()
|
||||||
|
|
||||||
|
// Only log requests to our admin dashboard to stdout
|
||||||
|
r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard)))
|
||||||
|
r.HandleFunc("/", ShowIndex)
|
||||||
|
|
||||||
|
// Wrap our server with our gzip handler to gzip compress all responses.
|
||||||
|
http.ListenAndServe(":8000", handlers.CompressHandler(r))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
BSD licensed. See the included LICENSE file for details.
|
||||||
|
|
20
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
Normal file
20
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
language: go
|
||||||
|
sudo: false
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- go: 1.2
|
||||||
|
- go: 1.3
|
||||||
|
- go: 1.4
|
||||||
|
- go: 1.5
|
||||||
|
- go: 1.6
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
install:
|
||||||
|
- # Skip
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go get -t -v ./...
|
||||||
|
- diff -u <(echo -n) <(gofmt -d .)
|
||||||
|
- go tool vet .
|
||||||
|
- go test -v -race ./...
|
242
vendor/github.com/gorilla/mux/README.md
generated
vendored
Normal file
242
vendor/github.com/gorilla/mux/README.md
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
||||||
|
mux
|
||||||
|
===
|
||||||
|
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
|
||||||
|
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
|
||||||
|
|
||||||
|
http://www.gorillatoolkit.org/pkg/mux
|
||||||
|
|
||||||
|
Package `gorilla/mux` implements a request router and dispatcher.
|
||||||
|
|
||||||
|
The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
|
||||||
|
|
||||||
|
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
|
||||||
|
* URL hosts and paths can have variables with an optional regular expression.
|
||||||
|
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
|
||||||
|
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
|
||||||
|
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
|
||||||
|
|
||||||
|
Let's start registering a couple of URL paths and handlers:
|
||||||
|
|
||||||
|
```go
|
||||||
|
func main() {
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/", HomeHandler)
|
||||||
|
r.HandleFunc("/products", ProductsHandler)
|
||||||
|
r.HandleFunc("/articles", ArticlesHandler)
|
||||||
|
http.Handle("/", r)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
|
||||||
|
|
||||||
|
Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/products/{key}", ProductHandler)
|
||||||
|
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
|
||||||
|
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
|
||||||
|
```
|
||||||
|
|
||||||
|
The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
vars := mux.Vars(request)
|
||||||
|
category := vars["category"]
|
||||||
|
```
|
||||||
|
|
||||||
|
And this is all you need to know about the basic usage. More advanced options are explained below.
|
||||||
|
|
||||||
|
Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
// Only matches if domain is "www.example.com".
|
||||||
|
r.Host("www.example.com")
|
||||||
|
// Matches a dynamic subdomain.
|
||||||
|
r.Host("{subdomain:[a-z]+}.domain.com")
|
||||||
|
```
|
||||||
|
|
||||||
|
There are several other matchers that can be added. To match path prefixes:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r.PathPrefix("/products/")
|
||||||
|
```
|
||||||
|
|
||||||
|
...or HTTP methods:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r.Methods("GET", "POST")
|
||||||
|
```
|
||||||
|
|
||||||
|
...or URL schemes:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r.Schemes("https")
|
||||||
|
```
|
||||||
|
|
||||||
|
...or header values:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r.Headers("X-Requested-With", "XMLHttpRequest")
|
||||||
|
```
|
||||||
|
|
||||||
|
...or query values:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r.Queries("key", "value")
|
||||||
|
```
|
||||||
|
|
||||||
|
...or to use a custom matcher function:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
|
||||||
|
return r.ProtoMajor == 0
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
...and finally, it is possible to combine several matchers in a single route:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r.HandleFunc("/products", ProductsHandler).
|
||||||
|
Host("www.example.com").
|
||||||
|
Methods("GET").
|
||||||
|
Schemes("http")
|
||||||
|
```
|
||||||
|
|
||||||
|
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
|
||||||
|
|
||||||
|
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
s := r.Host("www.example.com").Subrouter()
|
||||||
|
```
|
||||||
|
|
||||||
|
Then register routes in the subrouter:
|
||||||
|
|
||||||
|
```go
|
||||||
|
s.HandleFunc("/products/", ProductsHandler)
|
||||||
|
s.HandleFunc("/products/{key}", ProductHandler)
|
||||||
|
s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
|
||||||
|
```
|
||||||
|
|
||||||
|
The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
|
||||||
|
|
||||||
|
Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
|
||||||
|
|
||||||
|
There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
s := r.PathPrefix("/products").Subrouter()
|
||||||
|
// "/products/"
|
||||||
|
s.HandleFunc("/", ProductsHandler)
|
||||||
|
// "/products/{key}/"
|
||||||
|
s.HandleFunc("/{key}/", ProductHandler)
|
||||||
|
// "/products/{key}/details"
|
||||||
|
s.HandleFunc("/{key}/details", ProductDetailsHandler)
|
||||||
|
```
|
||||||
|
|
||||||
|
Now let's see how to build registered URLs.
|
||||||
|
|
||||||
|
Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||||
|
Name("article")
|
||||||
|
```
|
||||||
|
|
||||||
|
To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
|
||||||
|
|
||||||
|
```go
|
||||||
|
url, err := r.Get("article").URL("category", "technology", "id", "42")
|
||||||
|
```
|
||||||
|
|
||||||
|
...and the result will be a `url.URL` with the following path:
|
||||||
|
|
||||||
|
```
|
||||||
|
"/articles/technology/42"
|
||||||
|
```
|
||||||
|
|
||||||
|
This also works for host variables:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.Host("{subdomain}.domain.com").
|
||||||
|
Path("/articles/{category}/{id:[0-9]+}").
|
||||||
|
HandlerFunc(ArticleHandler).
|
||||||
|
Name("article")
|
||||||
|
|
||||||
|
// url.String() will be "http://news.domain.com/articles/technology/42"
|
||||||
|
url, err := r.Get("article").URL("subdomain", "news",
|
||||||
|
"category", "technology",
|
||||||
|
"id", "42")
|
||||||
|
```
|
||||||
|
|
||||||
|
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
|
||||||
|
|
||||||
|
Regex support also exists for matching Headers within a route. For example, we could do:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r.HeadersRegexp("Content-Type", "application/(text|json)")
|
||||||
|
```
|
||||||
|
|
||||||
|
...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
|
||||||
|
|
||||||
|
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// "http://news.domain.com/"
|
||||||
|
host, err := r.Get("article").URLHost("subdomain", "news")
|
||||||
|
|
||||||
|
// "/articles/technology/42"
|
||||||
|
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
|
||||||
|
```
|
||||||
|
|
||||||
|
And if you use subrouters, host and path defined separately can be built as well:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
s := r.Host("{subdomain}.domain.com").Subrouter()
|
||||||
|
s.Path("/articles/{category}/{id:[0-9]+}").
|
||||||
|
HandlerFunc(ArticleHandler).
|
||||||
|
Name("article")
|
||||||
|
|
||||||
|
// "http://news.domain.com/articles/technology/42"
|
||||||
|
url, err := r.Get("article").URL("subdomain", "news",
|
||||||
|
"category", "technology",
|
||||||
|
"id", "42")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Full Example
|
||||||
|
|
||||||
|
Here's a complete, runnable example of a small `mux` based server:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"log"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
func YourHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Write([]byte("Gorilla!\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := mux.NewRouter()
|
||||||
|
// Routes consist of a path and a handler function.
|
||||||
|
r.HandleFunc("/", YourHandler)
|
||||||
|
|
||||||
|
// Bind to a port and pass our router in
|
||||||
|
log.Fatal(http.ListenAndServe(":8000", r))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
BSD licensed. See the LICENSE file for details.
|
198
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
generated
vendored
Normal file
198
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,198 @@
|
||||||
|
# Created by .ignore support plugin (hsz.mobi)
|
||||||
|
coverage.txt
|
||||||
|
### Go template
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
||||||
|
*.prof
|
||||||
|
### Windows template
|
||||||
|
# Windows image file caches
|
||||||
|
Thumbs.db
|
||||||
|
ehthumbs.db
|
||||||
|
|
||||||
|
# Folder config file
|
||||||
|
Desktop.ini
|
||||||
|
|
||||||
|
# Recycle Bin used on file shares
|
||||||
|
$RECYCLE.BIN/
|
||||||
|
|
||||||
|
# Windows Installer files
|
||||||
|
*.cab
|
||||||
|
*.msi
|
||||||
|
*.msm
|
||||||
|
*.msp
|
||||||
|
|
||||||
|
# Windows shortcuts
|
||||||
|
*.lnk
|
||||||
|
### Kate template
|
||||||
|
# Swap Files #
|
||||||
|
.*.kate-swp
|
||||||
|
.swp.*
|
||||||
|
### SublimeText template
|
||||||
|
# cache files for sublime text
|
||||||
|
*.tmlanguage.cache
|
||||||
|
*.tmPreferences.cache
|
||||||
|
*.stTheme.cache
|
||||||
|
|
||||||
|
# workspace files are user-specific
|
||||||
|
*.sublime-workspace
|
||||||
|
|
||||||
|
# project files should be checked into the repository, unless a significant
|
||||||
|
# proportion of contributors will probably not be using SublimeText
|
||||||
|
# *.sublime-project
|
||||||
|
|
||||||
|
# sftp configuration file
|
||||||
|
sftp-config.json
|
||||||
|
### Linux template
|
||||||
|
*~
|
||||||
|
|
||||||
|
# temporary files which can be created if a process still has a handle open of a deleted file
|
||||||
|
.fuse_hidden*
|
||||||
|
|
||||||
|
# KDE directory preferences
|
||||||
|
.directory
|
||||||
|
|
||||||
|
# Linux trash folder which might appear on any partition or disk
|
||||||
|
.Trash-*
|
||||||
|
### JetBrains template
|
||||||
|
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||||
|
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||||
|
|
||||||
|
# User-specific stuff:
|
||||||
|
.idea
|
||||||
|
.idea/tasks.xml
|
||||||
|
.idea/dictionaries
|
||||||
|
.idea/vcs.xml
|
||||||
|
.idea/jsLibraryMappings.xml
|
||||||
|
|
||||||
|
# Sensitive or high-churn files:
|
||||||
|
.idea/dataSources.ids
|
||||||
|
.idea/dataSources.xml
|
||||||
|
.idea/dataSources.local.xml
|
||||||
|
.idea/sqlDataSources.xml
|
||||||
|
.idea/dynamic.xml
|
||||||
|
.idea/uiDesigner.xml
|
||||||
|
|
||||||
|
# Gradle:
|
||||||
|
.idea/gradle.xml
|
||||||
|
.idea/libraries
|
||||||
|
|
||||||
|
# Mongo Explorer plugin:
|
||||||
|
.idea/mongoSettings.xml
|
||||||
|
|
||||||
|
## File-based project format:
|
||||||
|
*.iws
|
||||||
|
|
||||||
|
## Plugin-specific files:
|
||||||
|
|
||||||
|
# IntelliJ
|
||||||
|
/out/
|
||||||
|
|
||||||
|
# mpeltonen/sbt-idea plugin
|
||||||
|
.idea_modules/
|
||||||
|
|
||||||
|
# JIRA plugin
|
||||||
|
atlassian-ide-plugin.xml
|
||||||
|
|
||||||
|
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||||
|
com_crashlytics_export_strings.xml
|
||||||
|
crashlytics.properties
|
||||||
|
crashlytics-build.properties
|
||||||
|
fabric.properties
|
||||||
|
### Xcode template
|
||||||
|
# Xcode
|
||||||
|
#
|
||||||
|
# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
|
||||||
|
|
||||||
|
## Build generated
|
||||||
|
build/
|
||||||
|
DerivedData/
|
||||||
|
|
||||||
|
## Various settings
|
||||||
|
*.pbxuser
|
||||||
|
!default.pbxuser
|
||||||
|
*.mode1v3
|
||||||
|
!default.mode1v3
|
||||||
|
*.mode2v3
|
||||||
|
!default.mode2v3
|
||||||
|
*.perspectivev3
|
||||||
|
!default.perspectivev3
|
||||||
|
xcuserdata/
|
||||||
|
|
||||||
|
## Other
|
||||||
|
*.moved-aside
|
||||||
|
*.xccheckout
|
||||||
|
*.xcscmblueprint
|
||||||
|
### Eclipse template
|
||||||
|
|
||||||
|
.metadata
|
||||||
|
bin/
|
||||||
|
tmp/
|
||||||
|
*.tmp
|
||||||
|
*.bak
|
||||||
|
*.swp
|
||||||
|
*~.nib
|
||||||
|
local.properties
|
||||||
|
.settings/
|
||||||
|
.loadpath
|
||||||
|
.recommenders
|
||||||
|
|
||||||
|
# Eclipse Core
|
||||||
|
.project
|
||||||
|
|
||||||
|
# External tool builders
|
||||||
|
.externalToolBuilders/
|
||||||
|
|
||||||
|
# Locally stored "Eclipse launch configurations"
|
||||||
|
*.launch
|
||||||
|
|
||||||
|
# PyDev specific (Python IDE for Eclipse)
|
||||||
|
*.pydevproject
|
||||||
|
|
||||||
|
# CDT-specific (C/C++ Development Tooling)
|
||||||
|
.cproject
|
||||||
|
|
||||||
|
# JDT-specific (Eclipse Java Development Tools)
|
||||||
|
.classpath
|
||||||
|
|
||||||
|
# Java annotation processor (APT)
|
||||||
|
.factorypath
|
||||||
|
|
||||||
|
# PDT-specific (PHP Development Tools)
|
||||||
|
.buildpath
|
||||||
|
|
||||||
|
# sbteclipse plugin
|
||||||
|
.target
|
||||||
|
|
||||||
|
# Tern plugin
|
||||||
|
.tern-project
|
||||||
|
|
||||||
|
# TeXlipse plugin
|
||||||
|
.texlipse
|
||||||
|
|
||||||
|
# STS (Spring Tool Suite)
|
||||||
|
.springBeans
|
||||||
|
|
||||||
|
# Code Recommenders
|
||||||
|
.recommenders/
|
||||||
|
|
19
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
generated
vendored
Normal file
19
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.6.x
|
||||||
|
- 1.7.x
|
||||||
|
- 1.8.x
|
||||||
|
- master
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get github.com/prometheus/client_golang/prometheus
|
||||||
|
- go get google.golang.org/grpc
|
||||||
|
- go get golang.org/x/net/context
|
||||||
|
- go get github.com/stretchr/testify
|
||||||
|
|
||||||
|
script:
|
||||||
|
- ./test_all.sh
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
247
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
Normal file
247
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
Normal file
|
@ -0,0 +1,247 @@
|
||||||
|
# Go gRPC Interceptors for Prometheus monitoring
|
||||||
|
|
||||||
|
[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
|
||||||
|
[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
|
||||||
|
[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
|
||||||
|
[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
|
||||||
|
[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
|
||||||
|
|
||||||
|
[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
|
||||||
|
|
||||||
|
A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus).
|
||||||
|
|
||||||
|
## Interceptors
|
||||||
|
|
||||||
|
[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed
|
||||||
|
by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement
|
||||||
|
common patterns: auth, logging and... monitoring.
|
||||||
|
|
||||||
|
To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware).
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both.
|
||||||
|
|
||||||
|
### Server-side
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||||
|
...
|
||||||
|
// Initialize your gRPC server's interceptor.
|
||||||
|
myServer := grpc.NewServer(
|
||||||
|
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
|
||||||
|
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
|
||||||
|
)
|
||||||
|
// Register your gRPC service implementations.
|
||||||
|
myservice.RegisterMyServiceServer(s.server, &myServiceImpl{})
|
||||||
|
// After all your registrations, make sure all of the Prometheus metrics are initialized.
|
||||||
|
grpc_prometheus.Register(myServer)
|
||||||
|
// Register Prometheus metrics handler.
|
||||||
|
http.Handle("/metrics", promhttp.Handler())
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Client-side
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||||
|
...
|
||||||
|
clientConn, err = grpc.Dial(
|
||||||
|
address,
|
||||||
|
grpc.WithUnaryInterceptor(UnaryClientInterceptor),
|
||||||
|
grpc.WithStreamInterceptor(StreamClientInterceptor)
|
||||||
|
)
|
||||||
|
client = pb_testproto.NewTestServiceClient(clientConn)
|
||||||
|
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
# Metrics
|
||||||
|
|
||||||
|
## Labels
|
||||||
|
|
||||||
|
All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods
|
||||||
|
contain the same rich labels:
|
||||||
|
|
||||||
|
* `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and
|
||||||
|
the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and
|
||||||
|
`service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"`
|
||||||
|
* `grpc_method` - the name of the method called on the gRPC service. E.g.
|
||||||
|
`grpc_method="Ping"`
|
||||||
|
* `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle).
|
||||||
|
Differentiating between the two is important especially for latency measurements.
|
||||||
|
|
||||||
|
- `unary` is single request, single response RPC
|
||||||
|
- `client_stream` is a multi-request, single response RPC
|
||||||
|
- `server_stream` is a single request, multi-response RPC
|
||||||
|
- `bidi_stream` is a multi-request, multi-response RPC
|
||||||
|
|
||||||
|
|
||||||
|
Additionally for completed RPCs, the following labels are used:
|
||||||
|
|
||||||
|
* `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go).
|
||||||
|
The list of all statuses is to long, but here are some common ones:
|
||||||
|
|
||||||
|
- `OK` - means the RPC was successful
|
||||||
|
- `IllegalArgument` - RPC contained bad values
|
||||||
|
- `Internal` - server-side error not disclosed to the clients
|
||||||
|
|
||||||
|
## Counters
|
||||||
|
|
||||||
|
The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go)
|
||||||
|
the respective Prometheus handler (usually `/metrics`).
|
||||||
|
|
||||||
|
For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts.
|
||||||
|
|
||||||
|
For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto),
|
||||||
|
calling the method `PingList`. The call succeeds and returns 20 messages in the stream.
|
||||||
|
|
||||||
|
First, immediately after the server receives the call it will increment the
|
||||||
|
`grpc_server_started_total` and start the handling time clock (if histograms are enabled).
|
||||||
|
|
||||||
|
```jsoniq
|
||||||
|
grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||||
|
```
|
||||||
|
|
||||||
|
Then the user logic gets invoked. It receives one message from the client containing the request
|
||||||
|
(it's a `server_stream`):
|
||||||
|
|
||||||
|
```jsoniq
|
||||||
|
grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||||
|
```
|
||||||
|
|
||||||
|
The user logic may return an error, or send multiple messages back to the client. In this case, on
|
||||||
|
each of the 20 messages sent back, a counter will be incremented:
|
||||||
|
|
||||||
|
```jsoniq
|
||||||
|
grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
|
||||||
|
```
|
||||||
|
|
||||||
|
After the call completes, it's status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
|
||||||
|
and the relevant call labels increment the `grpc_server_handled_total` counter.
|
||||||
|
|
||||||
|
```jsoniq
|
||||||
|
grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Histograms
|
||||||
|
|
||||||
|
[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
|
||||||
|
to measure latency distributions of your RPCs. However since it is bad practice to have metrics
|
||||||
|
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels))
|
||||||
|
the latency monitoring metrics are disabled by default. To enable them please call the following
|
||||||
|
in your server initialization code:
|
||||||
|
|
||||||
|
```jsoniq
|
||||||
|
grpc_prometheus.EnableHandlingTimeHistogram()
|
||||||
|
```
|
||||||
|
|
||||||
|
After the call completes, it's handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||||
|
variable `grpc_server_handling_seconds`. It contains three sub-metrics:
|
||||||
|
|
||||||
|
* `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method
|
||||||
|
* `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for
|
||||||
|
calculating average handling times
|
||||||
|
* `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective
|
||||||
|
handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/))
|
||||||
|
|
||||||
|
The counter values will look as follows:
|
||||||
|
|
||||||
|
```jsoniq
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1
|
||||||
|
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1
|
||||||
|
grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001
|
||||||
|
grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Useful query examples
|
||||||
|
|
||||||
|
Prometheus philosophy is to provide the most detailed metrics possible to the monitoring system, and
|
||||||
|
let the aggregations be handled there. The verbosity of above metrics make it possible to have that
|
||||||
|
flexibility. Here's a couple of useful monitoring queries:
|
||||||
|
|
||||||
|
|
||||||
|
### request inbound rate
|
||||||
|
```jsoniq
|
||||||
|
sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service)
|
||||||
|
```
|
||||||
|
For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the
|
||||||
|
rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note
|
||||||
|
how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together.
|
||||||
|
|
||||||
|
### unary request error rate
|
||||||
|
```jsoniq
|
||||||
|
sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
|
||||||
|
```
|
||||||
|
For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the
|
||||||
|
ones that didn't finish with `OK` code.
|
||||||
|
|
||||||
|
### unary request error percentage
|
||||||
|
```jsoniq
|
||||||
|
sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
|
||||||
|
/
|
||||||
|
sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service)
|
||||||
|
* 100.0
|
||||||
|
```
|
||||||
|
For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that
|
||||||
|
this is a combination of the two above examples. This is an example of a query you would like to
|
||||||
|
[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g.
|
||||||
|
"no more than 1% requests should fail".
|
||||||
|
|
||||||
|
### average response stream size
|
||||||
|
```jsoniq
|
||||||
|
sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
|
||||||
|
/
|
||||||
|
sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
|
||||||
|
```
|
||||||
|
For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all `
|
||||||
|
server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows
|
||||||
|
you to track when clients started to send "wide" queries that ret
|
||||||
|
Note the divisor is the number of started RPCs, in order to account for in-flight requests.
|
||||||
|
|
||||||
|
### 99%-tile latency of unary requests
|
||||||
|
```jsoniq
|
||||||
|
histogram_quantile(0.99,
|
||||||
|
sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles)
|
||||||
|
of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile
|
||||||
|
estimation will take samples in a rolling `5m` window. When combined with other quantiles
|
||||||
|
(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system
|
||||||
|
(e.g. impact of caching).
|
||||||
|
|
||||||
|
### percentage of slow unary queries (>250ms)
|
||||||
|
```jsoniq
|
||||||
|
100.0 - (
|
||||||
|
sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service)
|
||||||
|
/
|
||||||
|
sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service)
|
||||||
|
) * 100.0
|
||||||
|
```
|
||||||
|
For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25`
|
||||||
|
seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal)
|
||||||
|
buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps.
|
||||||
|
This is an example of a query you would like to alert on in your system for SLA violations,
|
||||||
|
e.g. "less than 1% of requests are slower than 250ms".
|
||||||
|
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io).
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
14
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/test_all.sh
generated
vendored
Normal file
14
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/test_all.sh
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
echo "" > coverage.txt
|
||||||
|
|
||||||
|
for d in $(go list ./... | grep -v vendor); do
|
||||||
|
echo -e "TESTS FOR: for \033[0;35m${d}\033[0m"
|
||||||
|
go test -race -v -coverprofile=profile.coverage.out -covermode=atomic $d
|
||||||
|
if [ -f profile.coverage.out ]; then
|
||||||
|
cat profile.coverage.out >> coverage.txt
|
||||||
|
rm profile.coverage.out
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
done
|
121
vendor/github.com/gtank/cryptopasta/README
generated
vendored
Normal file
121
vendor/github.com/gtank/cryptopasta/README
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
TL;DR- Copy & paste your crypto code from here instead of Stack Overflow.
|
||||||
|
|
||||||
|
This library demonstrates a suite of basic cryptography from the Go standard
|
||||||
|
library. To the extent possible, it tries to hide complexity and help you avoid
|
||||||
|
common mistakes. The recommendations were chosen as a compromise between
|
||||||
|
cryptographic qualities, the Go standard lib, and my existing use cases.
|
||||||
|
|
||||||
|
Some particular design choices I've made:
|
||||||
|
|
||||||
|
1. SHA-512/256 has been chosen as the default hash for the examples. It's
|
||||||
|
faster on 64-bit machines and immune to length extension. If it doesn't work
|
||||||
|
in your case, replace instances of it with ordinary SHA-256.
|
||||||
|
|
||||||
|
2. The specific ECDSA parameters were chosen to be compatible with RFC7518[1]
|
||||||
|
while using the best implementation of ECDSA available. Go's P-256 is
|
||||||
|
constant-time (which prevents certain types of attacks) while its P-384 and
|
||||||
|
P-521 are not.
|
||||||
|
|
||||||
|
3. Key parameters are arrays rather than slices so the compiler can help you
|
||||||
|
avoid mixing up the arguments. The signing and marshaling functions use the
|
||||||
|
crypto/ecdsa key types directly for the same reason.
|
||||||
|
|
||||||
|
4. Public/private keypairs for signing are marshaled into and out of PEM
|
||||||
|
format, making them relatively portable to other crypto software you're
|
||||||
|
likely to use (openssl, cfssl, etc).
|
||||||
|
|
||||||
|
5. Key generation functions will panic if they can't read enough random bytes
|
||||||
|
to generate the key. Key generation is critical, and if crypto/rand fails at
|
||||||
|
that stage then you should stop doing cryptography on that machine immediately.
|
||||||
|
|
||||||
|
6. The license is a CC0 public domain dedication, with the intent that you can
|
||||||
|
just copy bits of this directly into your code and never be required to
|
||||||
|
acknowledge my copyright, provide source code, or do anything else commonly
|
||||||
|
associated with open licenses.
|
||||||
|
|
||||||
|
|
||||||
|
The specific recommendations are:
|
||||||
|
|
||||||
|
|
||||||
|
Encryption - 256-bit AES-GCM with random 96-bit nonces
|
||||||
|
|
||||||
|
Using AES-GCM (instead of AES-CBC, AES-CFB, or AES-CTR, all of which Go also
|
||||||
|
offers) provides authentication in addition to confidentiality. This means that
|
||||||
|
the content of your data is hidden and that any modification of the encrypted
|
||||||
|
data will result in a failure to decrypt. This rules out entire classes of
|
||||||
|
possible attacks. Randomized nonces remove the choices around nonce generation
|
||||||
|
and management, which are another common source of error in crypto
|
||||||
|
implementations.
|
||||||
|
|
||||||
|
The interfaces in this library allow only the use of 256-bit keys.
|
||||||
|
|
||||||
|
|
||||||
|
Hashing - HMAC-SHA512/256
|
||||||
|
|
||||||
|
Using hash functions directly is fraught with various perils – it's common for
|
||||||
|
developers to accidentally write code that is subject to easy collision or
|
||||||
|
length extension attacks. HMAC is a function built on top of hashes and it
|
||||||
|
doesn't have those problems. Using SHA-512/256 as the underlying hash function
|
||||||
|
means the process will be faster on 64-bit machines, but the output will be the
|
||||||
|
same length as the more familiar SHA-256.
|
||||||
|
|
||||||
|
This interface encourages you to scope your hashes with an English-language
|
||||||
|
string (a "tag") that describes the purpose of the hash. Tagged hashes are a
|
||||||
|
common "security hygiene" measure to ensure that hashing the same data for
|
||||||
|
different purposes will produce different outputs.
|
||||||
|
|
||||||
|
|
||||||
|
Password hashing - bcrypt with work factor 14
|
||||||
|
|
||||||
|
Use this to store users' passwords and check them for login (e.g. in a web
|
||||||
|
backend). While they both have "hashing" in the name, password hashing is an
|
||||||
|
entirely different situation from ordinary hashing and requires its own
|
||||||
|
specialized algorithm. bcrypt is a hash function designed for password storage.
|
||||||
|
It can be made selectively slower (based on a "work factor") to increase the
|
||||||
|
difficulty of brute-force password cracking attempts.
|
||||||
|
|
||||||
|
As of 2016, a work factor of 14 should be well on the side of future-proofing
|
||||||
|
over performance. If it turns out to be too slow for your needs, you can try
|
||||||
|
using 13 or even 12. You should not go below work factor 12.
|
||||||
|
|
||||||
|
|
||||||
|
Symmetric Signatures / Message Authentication - HMAC-SHA512/256
|
||||||
|
|
||||||
|
When two parties share a secret key, they can use message authentication to
|
||||||
|
make sure that a piece of data hasn't been altered. You can think of it as a
|
||||||
|
"symmetric signature" - it proves both that the data is unchanged and that
|
||||||
|
someone who knows the shared secret key generated it. Anyone who does not know
|
||||||
|
the secret key can neither validate the data nor make valid alterations.
|
||||||
|
|
||||||
|
This comes up most often in the context of web stuff, such as:
|
||||||
|
|
||||||
|
1. Authenticating requests to your API. The most widely known example is
|
||||||
|
probably the Amazon AWS API, which requires you to sign requests with
|
||||||
|
HMAC-SHA256. In this type of use, the "secret key" is a token that the API
|
||||||
|
provider issues to authorized API users.
|
||||||
|
|
||||||
|
2. Validating authenticated tokens (cookies, JWTs, etc) that are issued by a
|
||||||
|
service but are stored by a user. In this case, the service wants to ensure
|
||||||
|
that a user doesn't modify the data contained in the token.
|
||||||
|
|
||||||
|
As with encryption, you should always use a 256-bit random key to
|
||||||
|
authenticate messages.
|
||||||
|
|
||||||
|
|
||||||
|
Asymmetric Signatures - ECDSA on P-256 with SHA-256 message digests
|
||||||
|
|
||||||
|
These are the classic public/private keypair signatures that you probably think
|
||||||
|
of when you hear the word "signature". The holder of a private key can sign
|
||||||
|
data that anyone who has the corresponding public key can verify.
|
||||||
|
|
||||||
|
Go takes very good care of us here. In particular, the Go implementation of
|
||||||
|
P-256 is constant time to protect against side-channel attacks, and the Go
|
||||||
|
implementation of ECDSA generates safe nonces to protect against the type of
|
||||||
|
repeated-nonce attack that broke the PS3.
|
||||||
|
|
||||||
|
In terms of JWTs, this algorithm is called "ES256". The functions
|
||||||
|
"EncodeSignatureJWT" and "DecodeSignatureJWT" will convert the basic signature
|
||||||
|
format to and from the encoding specified by RFC7515[2]
|
||||||
|
|
||||||
|
[1] https://tools.ietf.org/html/rfc7518#section-3.1
|
||||||
|
[2] https://tools.ietf.org/html/rfc7515#appendix-A.3
|
23
vendor/github.com/inconshreveable/mousetrap/README.md
generated
vendored
Normal file
23
vendor/github.com/inconshreveable/mousetrap/README.md
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
# mousetrap
|
||||||
|
|
||||||
|
mousetrap is a tiny library that answers a single question.
|
||||||
|
|
||||||
|
On a Windows machine, was the process invoked by someone double clicking on
|
||||||
|
the executable file while browsing in explorer?
|
||||||
|
|
||||||
|
### Motivation
|
||||||
|
|
||||||
|
Windows developers unfamiliar with command line tools will often "double-click"
|
||||||
|
the executable for a tool. Because most CLI tools print the help and then exit
|
||||||
|
when invoked without arguments, this is often very frustrating for those users.
|
||||||
|
|
||||||
|
mousetrap provides a way to detect these invocations so that you can provide
|
||||||
|
more helpful behavior and instructions on how to run the CLI tool. To see what
|
||||||
|
this looks like, both from an organizational and a technical perspective, see
|
||||||
|
https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
|
||||||
|
|
||||||
|
### The interface
|
||||||
|
|
||||||
|
The library exposes a single interface:
|
||||||
|
|
||||||
|
func StartedByExplorer() (bool)
|
25
vendor/github.com/jonboulle/clockwork/.gitignore
generated
vendored
Normal file
25
vendor/github.com/jonboulle/clockwork/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
||||||
|
|
||||||
|
*.swp
|
5
vendor/github.com/jonboulle/clockwork/.travis.yml
generated
vendored
Normal file
5
vendor/github.com/jonboulle/clockwork/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.3
|
||||||
|
|
||||||
|
sudo: false
|
69
vendor/github.com/jonboulle/clockwork/README.md
generated
vendored
Normal file
69
vendor/github.com/jonboulle/clockwork/README.md
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
clockwork
|
||||||
|
=========
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork)
|
||||||
|
[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork)
|
||||||
|
|
||||||
|
a simple fake clock for golang
|
||||||
|
|
||||||
|
# Usage
|
||||||
|
|
||||||
|
Replace uses of the `time` package with the `clockwork.Clock` interface instead.
|
||||||
|
|
||||||
|
For example, instead of using `time.Sleep` directly:
|
||||||
|
|
||||||
|
```
|
||||||
|
func my_func() {
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
do_something()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
inject a clock and use its `Sleep` method instead:
|
||||||
|
|
||||||
|
```
|
||||||
|
func my_func(clock clockwork.Clock) {
|
||||||
|
clock.Sleep(3 * time.Second)
|
||||||
|
do_something()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can easily test `my_func` with a `FakeClock`:
|
||||||
|
|
||||||
|
```
|
||||||
|
func TestMyFunc(t *testing.T) {
|
||||||
|
c := clockwork.NewFakeClock()
|
||||||
|
|
||||||
|
// Start our sleepy function
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
my_func(c)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Ensure we wait until my_func is sleeping
|
||||||
|
c.BlockUntil(1)
|
||||||
|
|
||||||
|
assert_state()
|
||||||
|
|
||||||
|
// Advance the FakeClock forward in time
|
||||||
|
c.Advance(3 * time.Second)
|
||||||
|
|
||||||
|
// Wait until the function completes
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
assert_state()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
and in production builds, simply inject the real clock instead:
|
||||||
|
```
|
||||||
|
my_func(clockwork.NewRealClock())
|
||||||
|
```
|
||||||
|
|
||||||
|
See [example_test.go](example_test.go) for a full example.
|
||||||
|
|
||||||
|
# Credits
|
||||||
|
|
||||||
|
clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time)
|
5
vendor/github.com/kylelemons/godebug/pretty/.gitignore
generated
vendored
Normal file
5
vendor/github.com/kylelemons/godebug/pretty/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
*.test
|
||||||
|
*.bench
|
||||||
|
*.golden
|
||||||
|
*.txt
|
||||||
|
*.prof
|
4
vendor/github.com/lib/pq/.gitignore
generated
vendored
Normal file
4
vendor/github.com/lib/pq/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
.db
|
||||||
|
*.test
|
||||||
|
*~
|
||||||
|
*.swp
|
86
vendor/github.com/lib/pq/.travis.sh
generated
vendored
Normal file
86
vendor/github.com/lib/pq/.travis.sh
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
client_configure() {
|
||||||
|
sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
|
||||||
|
}
|
||||||
|
|
||||||
|
pgdg_repository() {
|
||||||
|
local sourcelist='sources.list.d/postgresql.list'
|
||||||
|
|
||||||
|
curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add -
|
||||||
|
echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist"
|
||||||
|
sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update
|
||||||
|
}
|
||||||
|
|
||||||
|
postgresql_configure() {
|
||||||
|
sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config
|
||||||
|
local all all trust
|
||||||
|
hostnossl all pqgossltest 127.0.0.1/32 reject
|
||||||
|
hostnossl all pqgosslcert 127.0.0.1/32 reject
|
||||||
|
hostssl all pqgossltest 127.0.0.1/32 trust
|
||||||
|
hostssl all pqgosslcert 127.0.0.1/32 cert
|
||||||
|
host all all 127.0.0.1/32 trust
|
||||||
|
hostnossl all pqgossltest ::1/128 reject
|
||||||
|
hostnossl all pqgosslcert ::1/128 reject
|
||||||
|
hostssl all pqgossltest ::1/128 trust
|
||||||
|
hostssl all pqgosslcert ::1/128 cert
|
||||||
|
host all all ::1/128 trust
|
||||||
|
config
|
||||||
|
|
||||||
|
xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates
|
||||||
|
certs/root.crt
|
||||||
|
certs/server.crt
|
||||||
|
certs/server.key
|
||||||
|
certificates
|
||||||
|
|
||||||
|
sort -VCu <<-versions ||
|
||||||
|
$PGVERSION
|
||||||
|
9.2
|
||||||
|
versions
|
||||||
|
sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config
|
||||||
|
ssl_ca_file = 'root.crt'
|
||||||
|
ssl_cert_file = 'server.crt'
|
||||||
|
ssl_key_file = 'server.key'
|
||||||
|
config
|
||||||
|
|
||||||
|
echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null
|
||||||
|
|
||||||
|
sudo service postgresql restart
|
||||||
|
}
|
||||||
|
|
||||||
|
postgresql_install() {
|
||||||
|
xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages
|
||||||
|
postgresql-$PGVERSION
|
||||||
|
postgresql-server-dev-$PGVERSION
|
||||||
|
postgresql-contrib-$PGVERSION
|
||||||
|
packages
|
||||||
|
}
|
||||||
|
|
||||||
|
postgresql_uninstall() {
|
||||||
|
sudo service postgresql stop
|
||||||
|
xargs sudo apt-get -y --purge remove <<-packages
|
||||||
|
libpq-dev
|
||||||
|
libpq5
|
||||||
|
postgresql
|
||||||
|
postgresql-client-common
|
||||||
|
postgresql-common
|
||||||
|
packages
|
||||||
|
sudo rm -rf /var/lib/postgresql
|
||||||
|
}
|
||||||
|
|
||||||
|
megacheck_install() {
|
||||||
|
# Lock megacheck version at $MEGACHECK_VERSION to prevent spontaneous
|
||||||
|
# new error messages in old code.
|
||||||
|
go get -d honnef.co/go/tools/...
|
||||||
|
git -C $GOPATH/src/honnef.co/go/tools/ checkout $MEGACHECK_VERSION
|
||||||
|
go install honnef.co/go/tools/cmd/megacheck
|
||||||
|
megacheck --version
|
||||||
|
}
|
||||||
|
|
||||||
|
golint_install() {
|
||||||
|
go get golang.org/x/lint/golint
|
||||||
|
}
|
||||||
|
|
||||||
|
$1
|
50
vendor/github.com/lib/pq/.travis.yml
generated
vendored
Normal file
50
vendor/github.com/lib/pq/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.9.x
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
- master
|
||||||
|
|
||||||
|
sudo: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
- PGUSER=postgres
|
||||||
|
- PQGOSSLTESTS=1
|
||||||
|
- PQSSLCERTTEST_PATH=$PWD/certs
|
||||||
|
- PGHOST=127.0.0.1
|
||||||
|
- MEGACHECK_VERSION=2017.2.2
|
||||||
|
matrix:
|
||||||
|
- PGVERSION=10
|
||||||
|
- PGVERSION=9.6
|
||||||
|
- PGVERSION=9.5
|
||||||
|
- PGVERSION=9.4
|
||||||
|
- PGVERSION=9.3
|
||||||
|
- PGVERSION=9.2
|
||||||
|
- PGVERSION=9.1
|
||||||
|
- PGVERSION=9.0
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- ./.travis.sh postgresql_uninstall
|
||||||
|
- ./.travis.sh pgdg_repository
|
||||||
|
- ./.travis.sh postgresql_install
|
||||||
|
- ./.travis.sh postgresql_configure
|
||||||
|
- ./.travis.sh client_configure
|
||||||
|
- ./.travis.sh megacheck_install
|
||||||
|
- ./.travis.sh golint_install
|
||||||
|
- go get golang.org/x/tools/cmd/goimports
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- createdb pqgotest
|
||||||
|
- createuser -DRS pqgossltest
|
||||||
|
- createuser -DRS pqgosslcert
|
||||||
|
|
||||||
|
script:
|
||||||
|
- >
|
||||||
|
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
|
||||||
|
- go vet ./...
|
||||||
|
- megacheck -go 1.9 ./...
|
||||||
|
- golint ./...
|
||||||
|
- PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
|
||||||
|
- PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...
|
29
vendor/github.com/lib/pq/CONTRIBUTING.md
generated
vendored
Normal file
29
vendor/github.com/lib/pq/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
## Contributing to pq
|
||||||
|
|
||||||
|
`pq` has a backlog of pull requests, but contributions are still very
|
||||||
|
much welcome. You can help with patch review, submitting bug reports,
|
||||||
|
or adding new functionality. There is no formal style guide, but
|
||||||
|
please conform to the style of existing code and general Go formatting
|
||||||
|
conventions when submitting patches.
|
||||||
|
|
||||||
|
### Patch review
|
||||||
|
|
||||||
|
Help review existing open pull requests by commenting on the code or
|
||||||
|
proposed functionality.
|
||||||
|
|
||||||
|
### Bug reports
|
||||||
|
|
||||||
|
We appreciate any bug reports, but especially ones with self-contained
|
||||||
|
(doesn't depend on code outside of pq), minimal (can't be simplified
|
||||||
|
further) test cases. It's especially helpful if you can submit a pull
|
||||||
|
request with just the failing test case (you'll probably want to
|
||||||
|
pattern it after the tests in
|
||||||
|
[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go).
|
||||||
|
|
||||||
|
### New functionality
|
||||||
|
|
||||||
|
There are a number of pending patches for new functionality, so
|
||||||
|
additional feature patches will take a while to merge. Still, patches
|
||||||
|
are generally reviewed based on usefulness and complexity in addition
|
||||||
|
to time-in-queue, so if you have a knockout idea, take a shot. Feel
|
||||||
|
free to open an issue discussion your proposed patch beforehand.
|
95
vendor/github.com/lib/pq/README.md
generated
vendored
Normal file
95
vendor/github.com/lib/pq/README.md
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
# pq - A pure Go postgres driver for Go's database/sql package
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq)
|
||||||
|
[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq)
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
go get github.com/lib/pq
|
||||||
|
|
||||||
|
## Docs
|
||||||
|
|
||||||
|
For detailed documentation and basic usage examples, please see the package
|
||||||
|
documentation at <https://godoc.org/github.com/lib/pq>.
|
||||||
|
|
||||||
|
## Tests
|
||||||
|
|
||||||
|
`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
* SSL
|
||||||
|
* Handles bad connections for `database/sql`
|
||||||
|
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
|
||||||
|
* Scan binary blobs correctly (i.e. `bytea`)
|
||||||
|
* Package for `hstore` support
|
||||||
|
* COPY FROM support
|
||||||
|
* pq.ParseURL for converting urls to connection strings for sql.Open.
|
||||||
|
* Many libpq compatible environment variables
|
||||||
|
* Unix socket support
|
||||||
|
* Notifications: `LISTEN`/`NOTIFY`
|
||||||
|
* pgpass support
|
||||||
|
|
||||||
|
## Future / Things you can help with
|
||||||
|
|
||||||
|
* Better COPY FROM / COPY TO (see discussion in #181)
|
||||||
|
|
||||||
|
## Thank you (alphabetical)
|
||||||
|
|
||||||
|
Some of these contributors are from the original library `bmizerany/pq.go` whose
|
||||||
|
code still exists in here.
|
||||||
|
|
||||||
|
* Andy Balholm (andybalholm)
|
||||||
|
* Ben Berkert (benburkert)
|
||||||
|
* Benjamin Heatwole (bheatwole)
|
||||||
|
* Bill Mill (llimllib)
|
||||||
|
* Bjørn Madsen (aeons)
|
||||||
|
* Blake Gentry (bgentry)
|
||||||
|
* Brad Fitzpatrick (bradfitz)
|
||||||
|
* Charlie Melbye (cmelbye)
|
||||||
|
* Chris Bandy (cbandy)
|
||||||
|
* Chris Gilling (cgilling)
|
||||||
|
* Chris Walsh (cwds)
|
||||||
|
* Dan Sosedoff (sosedoff)
|
||||||
|
* Daniel Farina (fdr)
|
||||||
|
* Eric Chlebek (echlebek)
|
||||||
|
* Eric Garrido (minusnine)
|
||||||
|
* Eric Urban (hydrogen18)
|
||||||
|
* Everyone at The Go Team
|
||||||
|
* Evan Shaw (edsrzf)
|
||||||
|
* Ewan Chou (coocood)
|
||||||
|
* Fazal Majid (fazalmajid)
|
||||||
|
* Federico Romero (federomero)
|
||||||
|
* Fumin (fumin)
|
||||||
|
* Gary Burd (garyburd)
|
||||||
|
* Heroku (heroku)
|
||||||
|
* James Pozdena (jpoz)
|
||||||
|
* Jason McVetta (jmcvetta)
|
||||||
|
* Jeremy Jay (pbnjay)
|
||||||
|
* Joakim Sernbrant (serbaut)
|
||||||
|
* John Gallagher (jgallagher)
|
||||||
|
* Jonathan Rudenberg (titanous)
|
||||||
|
* Joël Stemmer (jstemmer)
|
||||||
|
* Kamil Kisiel (kisielk)
|
||||||
|
* Kelly Dunn (kellydunn)
|
||||||
|
* Keith Rarick (kr)
|
||||||
|
* Kir Shatrov (kirs)
|
||||||
|
* Lann Martin (lann)
|
||||||
|
* Maciek Sakrejda (uhoh-itsmaciek)
|
||||||
|
* Marc Brinkmann (mbr)
|
||||||
|
* Marko Tiikkaja (johto)
|
||||||
|
* Matt Newberry (MattNewberry)
|
||||||
|
* Matt Robenolt (mattrobenolt)
|
||||||
|
* Martin Olsen (martinolsen)
|
||||||
|
* Mike Lewis (mikelikespie)
|
||||||
|
* Nicolas Patry (Narsil)
|
||||||
|
* Oliver Tonnhofer (olt)
|
||||||
|
* Patrick Hayes (phayes)
|
||||||
|
* Paul Hammond (paulhammond)
|
||||||
|
* Ryan Smith (ryandotsmith)
|
||||||
|
* Samuel Stauffer (samuel)
|
||||||
|
* Timothée Peignier (cyberdelia)
|
||||||
|
* Travis Cline (tmc)
|
||||||
|
* TruongSinh Tran-Nguyen (truongsinh)
|
||||||
|
* Yaismel Miranda (ympons)
|
||||||
|
* notedit (notedit)
|
33
vendor/github.com/lib/pq/TESTS.md
generated
vendored
Normal file
33
vendor/github.com/lib/pq/TESTS.md
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
# Tests
|
||||||
|
|
||||||
|
## Running Tests
|
||||||
|
|
||||||
|
`go test` is used for testing. A running PostgreSQL
|
||||||
|
server is required, with the ability to log in. The
|
||||||
|
database to connect to test with is "pqgotest," on
|
||||||
|
"localhost" but these can be overridden using [environment
|
||||||
|
variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html).
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
PGHOST=/run/postgresql go test
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
A benchmark suite can be run as part of the tests:
|
||||||
|
|
||||||
|
go test -bench .
|
||||||
|
|
||||||
|
## Example setup (Docker)
|
||||||
|
|
||||||
|
Run a postgres container:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run --expose 5432:5432 postgres
|
||||||
|
```
|
||||||
|
|
||||||
|
Run tests:
|
||||||
|
|
||||||
|
```
|
||||||
|
PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test
|
||||||
|
```
|
1
vendor/github.com/lib/pq/go.mod
generated
vendored
Normal file
1
vendor/github.com/lib/pq/go.mod
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
module github.com/lib/pq
|
4
vendor/github.com/mattn/go-sqlite3/.gitignore
generated
vendored
Normal file
4
vendor/github.com/mattn/go-sqlite3/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
*.db
|
||||||
|
*.exe
|
||||||
|
*.dll
|
||||||
|
*.o
|
13
vendor/github.com/mattn/go-sqlite3/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/mattn/go-sqlite3/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
language: go
|
||||||
|
sudo: required
|
||||||
|
dist: trusty
|
||||||
|
go:
|
||||||
|
- 1.5
|
||||||
|
- 1.6
|
||||||
|
- tip
|
||||||
|
before_install:
|
||||||
|
- go get github.com/mattn/goveralls
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
script:
|
||||||
|
- $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
|
||||||
|
- go test -v . -tags "libsqlite3"
|
81
vendor/github.com/mattn/go-sqlite3/README.md
generated
vendored
Normal file
81
vendor/github.com/mattn/go-sqlite3/README.md
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
go-sqlite3
|
||||||
|
==========
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
|
||||||
|
[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
|
||||||
|
|
||||||
|
Description
|
||||||
|
-----------
|
||||||
|
|
||||||
|
sqlite3 driver conforming to the built-in database/sql interface
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
This package can be installed with the go get command:
|
||||||
|
|
||||||
|
go get github.com/mattn/go-sqlite3
|
||||||
|
|
||||||
|
_go-sqlite3_ is *cgo* package.
|
||||||
|
If you want to build your app using go-sqlite3, you need gcc.
|
||||||
|
However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore.
|
||||||
|
|
||||||
|
Documentation
|
||||||
|
-------------
|
||||||
|
|
||||||
|
API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3
|
||||||
|
|
||||||
|
Examples can be found under the `./_example` directory
|
||||||
|
|
||||||
|
FAQ
|
||||||
|
---
|
||||||
|
|
||||||
|
* Want to build go-sqlite3 with libsqlite3 on my linux.
|
||||||
|
|
||||||
|
Use `go build --tags "libsqlite3 linux"`
|
||||||
|
|
||||||
|
* Want to build go-sqlite3 with libsqlite3 on OS X.
|
||||||
|
|
||||||
|
Install sqlite3 from homebrew: `brew install sqlite3`
|
||||||
|
|
||||||
|
Use `go build --tags "libsqlite3 darwin"`
|
||||||
|
|
||||||
|
* Want to build go-sqlite3 with icu extension.
|
||||||
|
|
||||||
|
Use `go build --tags "icu"`
|
||||||
|
|
||||||
|
* Can't build go-sqlite3 on windows 64bit.
|
||||||
|
|
||||||
|
> Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
|
||||||
|
> See: https://github.com/mattn/go-sqlite3/issues/27
|
||||||
|
|
||||||
|
* Getting insert error while query is opened.
|
||||||
|
|
||||||
|
> You can pass some arguments into the connection string, for example, a URI.
|
||||||
|
> See: https://github.com/mattn/go-sqlite3/issues/39
|
||||||
|
|
||||||
|
* Do you want to cross compile? mingw on Linux or Mac?
|
||||||
|
|
||||||
|
> See: https://github.com/mattn/go-sqlite3/issues/106
|
||||||
|
> See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html
|
||||||
|
|
||||||
|
* Want to get time.Time with current locale
|
||||||
|
|
||||||
|
Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`.
|
||||||
|
|
||||||
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
|
MIT: http://mattn.mit-license.org/2012
|
||||||
|
|
||||||
|
sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h
|
||||||
|
|
||||||
|
The -binding suffix was added to avoid build failures under gccgo.
|
||||||
|
|
||||||
|
In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3.
|
||||||
|
|
||||||
|
Author
|
||||||
|
------
|
||||||
|
|
||||||
|
Yasuhiro Matsumoto (a.k.a mattn)
|
10
vendor/github.com/pquerna/cachecontrol/.travis.yml
generated
vendored
Normal file
10
vendor/github.com/pquerna/cachecontrol/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get -d -v ./...
|
||||||
|
- go get -u github.com/stretchr/testify/require
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.5
|
||||||
|
- 1.6
|
||||||
|
- tip
|
108
vendor/github.com/pquerna/cachecontrol/README.md
generated
vendored
Normal file
108
vendor/github.com/pquerna/cachecontrol/README.md
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
# cachecontrol: HTTP Caching Parser and Interpretation
|
||||||
|
|
||||||
|
[![GoDoc][1]][2][![Build Status](https://travis-ci.org/pquerna/cachecontrol.svg?branch=master)](https://travis-ci.org/pquerna/cachecontrol)
|
||||||
|
[1]: https://godoc.org/github.com/pquerna/cachecontrol?status.svg
|
||||||
|
[2]: https://godoc.org/github.com/pquerna/cachecontrol
|
||||||
|
|
||||||
|
|
||||||
|
`cachecontrol` implements [RFC 7234](http://tools.ietf.org/html/rfc7234) __Hypertext Transfer Protocol (HTTP/1.1): Caching__. It does this by parsing the `Cache-Control` and other headers, providing information about requests and responses -- but `cachecontrol` does not implement an actual cache backend, just the control plane to make decisions about if a particular response is cachable.
|
||||||
|
|
||||||
|
# Usage
|
||||||
|
|
||||||
|
`cachecontrol.CachableResponse` returns an array of [reasons](https://godoc.org/github.com/pquerna/cachecontrol/cacheobject#Reason) why a response should not be cached and when it expires. In the case that `len(reasons) == 0`, the response is cachable according to the RFC. However, some people want non-compliant caches for various business use cases, so each reason is specifically named, so if your cache wants to cache `POST` requests, it can easily do that, but still be RFC compliant in other situations.
|
||||||
|
|
||||||
|
# Examples
|
||||||
|
|
||||||
|
## Can you cache Example.com?
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pquerna/cachecontrol"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
req, _ := http.NewRequest("GET", "http://www.example.com/", nil)
|
||||||
|
|
||||||
|
res, _ := http.DefaultClient.Do(req)
|
||||||
|
_, _ = ioutil.ReadAll(res.Body)
|
||||||
|
|
||||||
|
reasons, expires, _ := cachecontrol.CachableResponse(req, res, cachecontrol.Options{})
|
||||||
|
|
||||||
|
fmt.Println("Reasons to not cache: ", reasons)
|
||||||
|
fmt.Println("Expiration: ", expires.String())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Can I use this in a high performance caching server?
|
||||||
|
|
||||||
|
`cachecontrol` is divided into two packages: `cachecontrol` with a high level API, and a lower level `cacheobject` package. Use [Object](https://godoc.org/github.com/pquerna/cachecontrol/cacheobject#Object) in a high performance use case where you have previously parsed headers containing dates or would like to avoid memory allocations.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pquerna/cachecontrol/cacheobject"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
req, _ := http.NewRequest("GET", "http://www.example.com/", nil)
|
||||||
|
|
||||||
|
res, _ := http.DefaultClient.Do(req)
|
||||||
|
_, _ = ioutil.ReadAll(res.Body)
|
||||||
|
|
||||||
|
reqDir, _ := cacheobject.ParseRequestCacheControl(req.Header.Get("Cache-Control"))
|
||||||
|
|
||||||
|
resDir, _ := cacheobject.ParseResponseCacheControl(res.Header.Get("Cache-Control"))
|
||||||
|
expiresHeader, _ := http.ParseTime(res.Header.Get("Expires"))
|
||||||
|
dateHeader, _ := http.ParseTime(res.Header.Get("Date"))
|
||||||
|
lastModifiedHeader, _ := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||||
|
|
||||||
|
obj := cacheobject.Object{
|
||||||
|
RespDirectives: resDir,
|
||||||
|
RespHeaders: res.Header,
|
||||||
|
RespStatusCode: res.StatusCode,
|
||||||
|
RespExpiresHeader: expiresHeader,
|
||||||
|
RespDateHeader: dateHeader,
|
||||||
|
RespLastModifiedHeader: lastModifiedHeader,
|
||||||
|
|
||||||
|
ReqDirectives: reqDir,
|
||||||
|
ReqHeaders: req.Header,
|
||||||
|
ReqMethod: req.Method,
|
||||||
|
|
||||||
|
NowUTC: time.Now().UTC(),
|
||||||
|
}
|
||||||
|
rv := cacheobject.ObjectResults{}
|
||||||
|
|
||||||
|
cacheobject.CachableObject(&obj, &rv)
|
||||||
|
cacheobject.ExpirationObject(&obj, &rv)
|
||||||
|
|
||||||
|
fmt.Println("Errors: ", rv.OutErr)
|
||||||
|
fmt.Println("Reasons to not cache: ", rv.OutReasons)
|
||||||
|
fmt.Println("Warning headers to add: ", rv.OutWarnings)
|
||||||
|
fmt.Println("Expiration: ", rv.OutExpirationTime.String())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Improvements, bugs, adding features, and taking cachecontrol new directions!
|
||||||
|
|
||||||
|
Please [open issues in Github](https://github.com/pquerna/cachecontrol/issues) for ideas, bugs, and general thoughts. Pull requests are of course preferred :)
|
||||||
|
|
||||||
|
# Credits
|
||||||
|
|
||||||
|
`cachecontrol` has recieved significant contributions from:
|
||||||
|
|
||||||
|
* [Paul Querna](https://github.com/pquerna)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
`cachecontrol` is licensed under the [Apache License, Version 2.0](./LICENSE)
|
1
vendor/github.com/prometheus/client_golang/prometheus/.gitignore
generated
vendored
Normal file
1
vendor/github.com/prometheus/client_golang/prometheus/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
command-line-arguments.test
|
1
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
Normal file
1
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
13
vendor/github.com/prometheus/client_model/AUTHORS.md
generated
vendored
Normal file
13
vendor/github.com/prometheus/client_model/AUTHORS.md
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||||
|
Julius Volz in 2012.
|
||||||
|
|
||||||
|
Maintainers of this repository:
|
||||||
|
|
||||||
|
* Björn Rabenstein <beorn@soundcloud.com>
|
||||||
|
|
||||||
|
The following individuals have contributed code to this repository
|
||||||
|
(listed in alphabetical order):
|
||||||
|
|
||||||
|
* Björn Rabenstein <beorn@soundcloud.com>
|
||||||
|
* Matt T. Proud <matt.proud@gmail.com>
|
||||||
|
* Tobias Schmidt <ts@soundcloud.com>
|
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
Normal file
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
PACKAGE
|
||||||
|
|
||||||
|
package goautoneg
|
||||||
|
import "bitbucket.org/ww/goautoneg"
|
||||||
|
|
||||||
|
HTTP Content-Type Autonegotiation.
|
||||||
|
|
||||||
|
The functions in this package implement the behaviour specified in
|
||||||
|
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||||
|
|
||||||
|
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
|
||||||
|
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||||
|
names of its contributors may be used to endorse or promote
|
||||||
|
products derived from this software without specific prior written
|
||||||
|
permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
FUNCTIONS
|
||||||
|
|
||||||
|
func Negotiate(header string, alternatives []string) (content_type string)
|
||||||
|
Negotiate the most appropriate content_type given the accept header
|
||||||
|
and a list of alternatives.
|
||||||
|
|
||||||
|
func ParseAccept(header string) (accept []Accept)
|
||||||
|
Parse an Accept Header string returning a sorted list
|
||||||
|
of clauses
|
||||||
|
|
||||||
|
|
||||||
|
TYPES
|
||||||
|
|
||||||
|
type Accept struct {
|
||||||
|
Type, SubType string
|
||||||
|
Q float32
|
||||||
|
Params map[string]string
|
||||||
|
}
|
||||||
|
Structure to represent a clause in an HTTP Accept Header
|
||||||
|
|
||||||
|
|
||||||
|
SUBDIRECTORIES
|
||||||
|
|
||||||
|
.hg
|
5
vendor/github.com/prometheus/procfs/.travis.yml
generated
vendored
Normal file
5
vendor/github.com/prometheus/procfs/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.6.4
|
||||||
|
- 1.7.4
|
18
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Prometheus uses GitHub to manage reviews of pull requests.
|
||||||
|
|
||||||
|
* If you have a trivial fix or improvement, go ahead and create a pull request,
|
||||||
|
addressing (with `@...`) the maintainer of this repository (see
|
||||||
|
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
|
||||||
|
|
||||||
|
* If you plan to do something more involved, first discuss your ideas
|
||||||
|
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||||
|
This will avoid unnecessary work and surely give you and us a good deal
|
||||||
|
of inspiration.
|
||||||
|
|
||||||
|
* Relevant coding style guidelines are the [Go Code Review
|
||||||
|
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||||
|
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||||
|
Practices for Production
|
||||||
|
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
1
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
Normal file
1
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
* Tobias Schmidt <tobidt@gmail.com>
|
6
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
Normal file
6
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
ci:
|
||||||
|
! gofmt -l *.go | read nothing
|
||||||
|
go vet
|
||||||
|
go test -v ./...
|
||||||
|
go get github.com/golang/lint/golint
|
||||||
|
golint *.go
|
11
vendor/github.com/prometheus/procfs/README.md
generated
vendored
Normal file
11
vendor/github.com/prometheus/procfs/README.md
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# procfs
|
||||||
|
|
||||||
|
This procfs package provides functions to retrieve system, kernel and process
|
||||||
|
metrics from the pseudo-filesystem proc.
|
||||||
|
|
||||||
|
*WARNING*: This package is a work in progress. Its API may still break in
|
||||||
|
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
|
||||||
|
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
6
vendor/github.com/russellhaering/goxmldsig/.travis.yml
generated
vendored
Normal file
6
vendor/github.com/russellhaering/goxmldsig/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.5
|
||||||
|
- 1.6
|
||||||
|
- tip
|
90
vendor/github.com/russellhaering/goxmldsig/README.md
generated
vendored
Normal file
90
vendor/github.com/russellhaering/goxmldsig/README.md
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
# goxmldsig
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/russellhaering/goxmldsig.svg?branch=master)](https://travis-ci.org/russellhaering/goxmldsig)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/russellhaering/goxmldsig?status.svg)](https://godoc.org/github.com/russellhaering/goxmldsig)
|
||||||
|
|
||||||
|
XML Digital Signatures implemented in pure Go.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
Install `goxmldsig` into your `$GOPATH` using `go get`:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get github.com/russellhaering/goxmldsig
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Signing
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/beevik/etree"
|
||||||
|
"github.com/russellhaering/goxmldsig"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Generate a key and self-signed certificate for signing
|
||||||
|
randomKeyStore := dsig.RandomKeyStoreForTest()
|
||||||
|
ctx := dsig.NewDefaultSigningContext(randomKeyStore)
|
||||||
|
elementToSign := &etree.Element{
|
||||||
|
Tag: "ExampleElement",
|
||||||
|
}
|
||||||
|
elementToSign.CreateAttr("ID", "id1234")
|
||||||
|
|
||||||
|
// Sign the element
|
||||||
|
signedElement, err := ctx.SignEnveloped(elementToSign)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize the signed element. It is important not to modify the element
|
||||||
|
// after it has been signed - even pretty-printing the XML will invalidate
|
||||||
|
// the signature.
|
||||||
|
doc := etree.NewDocument()
|
||||||
|
doc.SetRoot(signedElement)
|
||||||
|
str, err := doc.WriteToString()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
println(str)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Signature Validation
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Validate an element against a root certificate
|
||||||
|
func validate(root *x509.Certificate, el *etree.Element) {
|
||||||
|
// Construct a signing context with one or more roots of trust.
|
||||||
|
ctx := dsig.NewDefaultValidationContext(&dsig.MemoryX509CertificateStore{
|
||||||
|
Roots: []*x509.Certificate{root},
|
||||||
|
})
|
||||||
|
|
||||||
|
// It is important to only use the returned validated element.
|
||||||
|
// See: https://www.w3.org/TR/xmldsig-bestpractices/#check-what-is-signed
|
||||||
|
validated, err := ctx.Validate(el)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := etree.NewDocument()
|
||||||
|
doc.SetRoot(validated)
|
||||||
|
str, err := doc.WriteToString()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
println(str)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
This library was created in order to [implement SAML 2.0](https://github.com/russellhaering/gosaml2)
|
||||||
|
without needing to execute a command line tool to create and validate signatures. It currently
|
||||||
|
only implements the subset of relevant standards needed to support that implementation, but
|
||||||
|
I hope to make it more complete over time. Contributions are welcome.
|
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
Normal file
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
logrus
|
12
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.6.x
|
||||||
|
- 1.7.x
|
||||||
|
- 1.8.x
|
||||||
|
- tip
|
||||||
|
env:
|
||||||
|
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||||
|
install:
|
||||||
|
- go get github.com/stretchr/testify/assert
|
||||||
|
script:
|
||||||
|
- go test -race -v .
|
109
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
109
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
# 1.0.2
|
||||||
|
|
||||||
|
* bug: quote non-string values in text formatter (#583)
|
||||||
|
* Make (*Logger) SetLevel a public method
|
||||||
|
|
||||||
|
# 1.0.1
|
||||||
|
|
||||||
|
* bug: fix escaping in text formatter (#575)
|
||||||
|
|
||||||
|
# 1.0.0
|
||||||
|
|
||||||
|
* Officially changed name to lower-case
|
||||||
|
* bug: colors on Windows 10 (#541)
|
||||||
|
* bug: fix race in accessing level (#512)
|
||||||
|
|
||||||
|
# 0.11.5
|
||||||
|
|
||||||
|
* feature: add writer and writerlevel to entry (#372)
|
||||||
|
|
||||||
|
# 0.11.4
|
||||||
|
|
||||||
|
* bug: fix undefined variable on solaris (#493)
|
||||||
|
|
||||||
|
# 0.11.3
|
||||||
|
|
||||||
|
* formatter: configure quoting of empty values (#484)
|
||||||
|
* formatter: configure quoting character (default is `"`) (#484)
|
||||||
|
* bug: fix not importing io correctly in non-linux environments (#481)
|
||||||
|
|
||||||
|
# 0.11.2
|
||||||
|
|
||||||
|
* bug: fix windows terminal detection (#476)
|
||||||
|
|
||||||
|
# 0.11.1
|
||||||
|
|
||||||
|
* bug: fix tty detection with custom out (#471)
|
||||||
|
|
||||||
|
# 0.11.0
|
||||||
|
|
||||||
|
* performance: Use bufferpool to allocate (#370)
|
||||||
|
* terminal: terminal detection for app-engine (#343)
|
||||||
|
* feature: exit handler (#375)
|
||||||
|
|
||||||
|
# 0.10.0
|
||||||
|
|
||||||
|
* feature: Add a test hook (#180)
|
||||||
|
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||||
|
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||||
|
* performance: avoid re-allocations on `WithFields` (#335)
|
||||||
|
|
||||||
|
# 0.9.0
|
||||||
|
|
||||||
|
* logrus/text_formatter: don't emit empty msg
|
||||||
|
* logrus/hooks/airbrake: move out of main repository
|
||||||
|
* logrus/hooks/sentry: move out of main repository
|
||||||
|
* logrus/hooks/papertrail: move out of main repository
|
||||||
|
* logrus/hooks/bugsnag: move out of main repository
|
||||||
|
* logrus/core: run tests with `-race`
|
||||||
|
* logrus/core: detect TTY based on `stderr`
|
||||||
|
* logrus/core: support `WithError` on logger
|
||||||
|
* logrus/core: Solaris support
|
||||||
|
|
||||||
|
# 0.8.7
|
||||||
|
|
||||||
|
* logrus/core: fix possible race (#216)
|
||||||
|
* logrus/doc: small typo fixes and doc improvements
|
||||||
|
|
||||||
|
|
||||||
|
# 0.8.6
|
||||||
|
|
||||||
|
* hooks/raven: allow passing an initialized client
|
||||||
|
|
||||||
|
# 0.8.5
|
||||||
|
|
||||||
|
* logrus/core: revert #208
|
||||||
|
|
||||||
|
# 0.8.4
|
||||||
|
|
||||||
|
* formatter/text: fix data race (#218)
|
||||||
|
|
||||||
|
# 0.8.3
|
||||||
|
|
||||||
|
* logrus/core: fix entry log level (#208)
|
||||||
|
* logrus/core: improve performance of text formatter by 40%
|
||||||
|
* logrus/core: expose `LevelHooks` type
|
||||||
|
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||||
|
* formatter/text: print structs more verbosely
|
||||||
|
|
||||||
|
# 0.8.2
|
||||||
|
|
||||||
|
* logrus: fix more Fatal family functions
|
||||||
|
|
||||||
|
# 0.8.1
|
||||||
|
|
||||||
|
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||||
|
|
||||||
|
# 0.8.0
|
||||||
|
|
||||||
|
* logrus: defaults to stderr instead of stdout
|
||||||
|
* hooks/sentry: add special field for `*http.Request`
|
||||||
|
* formatter/text: ignore Windows for colors
|
||||||
|
|
||||||
|
# 0.7.3
|
||||||
|
|
||||||
|
* formatter/\*: allow configuration of timestamp layout
|
||||||
|
|
||||||
|
# 0.7.2
|
||||||
|
|
||||||
|
* formatter/text: Add configuration option for time format (#158)
|
504
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
Normal file
504
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
Normal file
|
@ -0,0 +1,504 @@
|
||||||
|
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
|
||||||
|
|
||||||
|
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||||
|
the standard library logger. [Godoc][godoc].
|
||||||
|
|
||||||
|
**Seeing weird case-sensitive problems?** It's in the past been possible to
|
||||||
|
import Logrus as both upper- and lower-case. Due to the Go package environment,
|
||||||
|
this caused issues in the community and we needed a standard. Some environments
|
||||||
|
experienced problems with the upper-case variant, so the lower-case was decided.
|
||||||
|
Everything using `logrus` will need to use the lower-case:
|
||||||
|
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||||
|
|
||||||
|
To fix Glide, see [these
|
||||||
|
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||||
|
For an in-depth explanation of the casing issue, see [this
|
||||||
|
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
|
||||||
|
|
||||||
|
**Are you interested in assisting in maintaining Logrus?** Currently I have a
|
||||||
|
lot of obligations, and I am unable to provide Logrus with the maintainership it
|
||||||
|
needs. If you'd like to help, please reach out to me at `simon at author's
|
||||||
|
username dot com`.
|
||||||
|
|
||||||
|
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||||
|
plain text):
|
||||||
|
|
||||||
|
![Colored](http://i.imgur.com/PY7qMwd.png)
|
||||||
|
|
||||||
|
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||||
|
or Splunk:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||||
|
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||||
|
|
||||||
|
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||||
|
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||||
|
|
||||||
|
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||||
|
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||||
|
|
||||||
|
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||||
|
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||||
|
|
||||||
|
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||||
|
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||||
|
```
|
||||||
|
|
||||||
|
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||||
|
attached, the output is compatible with the
|
||||||
|
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||||
|
exit status 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Case-sensitivity
|
||||||
|
|
||||||
|
The organization's name was changed to lower-case--and this will not be changed
|
||||||
|
back. If you are getting import conflicts due to case sensitivity, please use
|
||||||
|
the lower-case import: `github.com/sirupsen/logrus`.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
}).Info("A walrus appears")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||||
|
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||||
|
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||||
|
want:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Log as JSON instead of the default ASCII formatter.
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
|
||||||
|
// Output to stdout instead of the default stderr
|
||||||
|
// Can be any io.Writer, see below for File example
|
||||||
|
log.SetOutput(os.Stdout)
|
||||||
|
|
||||||
|
// Only log the warning severity or above.
|
||||||
|
log.SetLevel(log.WarnLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 122,
|
||||||
|
}).Warn("The group's number increased tremendously!")
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 100,
|
||||||
|
}).Fatal("The ice breaks!")
|
||||||
|
|
||||||
|
// A common pattern is to re-use fields between logging statements by re-using
|
||||||
|
// the logrus.Entry returned from WithFields()
|
||||||
|
contextLogger := log.WithFields(log.Fields{
|
||||||
|
"common": "this is a common field",
|
||||||
|
"other": "I also should be logged always",
|
||||||
|
})
|
||||||
|
|
||||||
|
contextLogger.Info("I'll be logged with common and other field")
|
||||||
|
contextLogger.Info("Me too")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For more advanced usage such as logging to multiple locations from the same
|
||||||
|
application, you can also create an instance of the `logrus` Logger:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create a new instance of the logger. You can have any number of instances.
|
||||||
|
var log = logrus.New()
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// The API for setting attributes is a little different than the package level
|
||||||
|
// exported logger. See Godoc.
|
||||||
|
log.Out = os.Stdout
|
||||||
|
|
||||||
|
// You could set this to any `io.Writer` such as a file
|
||||||
|
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||||
|
// if err == nil {
|
||||||
|
// log.Out = file
|
||||||
|
// } else {
|
||||||
|
// log.Info("Failed to log to file, using default stderr")
|
||||||
|
// }
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Fields
|
||||||
|
|
||||||
|
Logrus encourages careful, structured logging through logging fields instead of
|
||||||
|
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||||
|
to send event %s to topic %s with key %d")`, you should log the much more
|
||||||
|
discoverable:
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"event": event,
|
||||||
|
"topic": topic,
|
||||||
|
"key": key,
|
||||||
|
}).Fatal("Failed to send event")
|
||||||
|
```
|
||||||
|
|
||||||
|
We've found this API forces you to think about logging in a way that produces
|
||||||
|
much more useful logging messages. We've been in countless situations where just
|
||||||
|
a single added field to a log statement that was already there would've saved us
|
||||||
|
hours. The `WithFields` call is optional.
|
||||||
|
|
||||||
|
In general, with Logrus using any of the `printf`-family functions should be
|
||||||
|
seen as a hint you should add a field, however, you can still use the
|
||||||
|
`printf`-family functions with Logrus.
|
||||||
|
|
||||||
|
#### Default Fields
|
||||||
|
|
||||||
|
Often it's helpful to have fields _always_ attached to log statements in an
|
||||||
|
application or parts of one. For example, you may want to always log the
|
||||||
|
`request_id` and `user_ip` in the context of a request. Instead of writing
|
||||||
|
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
|
||||||
|
every line, you can create a `logrus.Entry` to pass around instead:
|
||||||
|
|
||||||
|
```go
|
||||||
|
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
|
||||||
|
requestLogger.Info("something happened on that request") # will log request_id and user_ip
|
||||||
|
requestLogger.Warn("something not great happened")
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Hooks
|
||||||
|
|
||||||
|
You can add hooks for logging levels. For example to send errors to an exception
|
||||||
|
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||||
|
multiple places simultaneously, e.g. syslog.
|
||||||
|
|
||||||
|
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||||
|
`init`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||||
|
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||||
|
"log/syslog"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
|
||||||
|
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||||
|
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||||
|
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||||
|
|
||||||
|
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Unable to connect to local syslog daemon")
|
||||||
|
} else {
|
||||||
|
log.AddHook(hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||||
|
|
||||||
|
| Hook | Description |
|
||||||
|
| ----- | ----------- |
|
||||||
|
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||||
|
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||||
|
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
|
||||||
|
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||||
|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||||
|
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||||
|
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
|
||||||
|
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||||
|
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
|
||||||
|
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||||
|
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
|
||||||
|
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||||
|
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||||
|
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||||
|
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||||
|
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
|
||||||
|
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||||
|
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
||||||
|
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||||
|
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||||
|
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||||
|
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
||||||
|
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||||
|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||||
|
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||||
|
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
|
||||||
|
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||||
|
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
|
||||||
|
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||||
|
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||||
|
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
||||||
|
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
||||||
|
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||||
|
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||||
|
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||||
|
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
||||||
|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||||
|
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||||
|
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
||||||
|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||||
|
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||||
|
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
||||||
|
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
||||||
|
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||||
|
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
||||||
|
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
|
||||||
|
|
||||||
|
#### Level logging
|
||||||
|
|
||||||
|
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.Debug("Useful debugging information.")
|
||||||
|
log.Info("Something noteworthy happened!")
|
||||||
|
log.Warn("You should probably take a look at this.")
|
||||||
|
log.Error("Something failed but I'm not quitting.")
|
||||||
|
// Calls os.Exit(1) after logging
|
||||||
|
log.Fatal("Bye.")
|
||||||
|
// Calls panic() after logging
|
||||||
|
log.Panic("I'm bailing.")
|
||||||
|
```
|
||||||
|
|
||||||
|
You can set the logging level on a `Logger`, then it will only log entries with
|
||||||
|
that severity or anything above it:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||||
|
log.SetLevel(log.InfoLevel)
|
||||||
|
```
|
||||||
|
|
||||||
|
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||||
|
environment if your application has that.
|
||||||
|
|
||||||
|
#### Entries
|
||||||
|
|
||||||
|
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||||
|
automatically added to all logging events:
|
||||||
|
|
||||||
|
1. `time`. The timestamp when the entry was created.
|
||||||
|
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||||
|
the `AddFields` call. E.g. `Failed to send event.`
|
||||||
|
3. `level`. The logging level. E.g. `info`.
|
||||||
|
|
||||||
|
#### Environments
|
||||||
|
|
||||||
|
Logrus has no notion of environment.
|
||||||
|
|
||||||
|
If you wish for hooks and formatters to only be used in specific environments,
|
||||||
|
you should handle that yourself. For example, if your application has a global
|
||||||
|
variable `Environment`, which is a string representation of the environment you
|
||||||
|
could do:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
init() {
|
||||||
|
// do something here to set environment depending on an environment variable
|
||||||
|
// or command-line flag
|
||||||
|
if Environment == "production" {
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
} else {
|
||||||
|
// The TextFormatter is default, you don't actually have to do this.
|
||||||
|
log.SetFormatter(&log.TextFormatter{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This configuration is how `logrus` was intended to be used, but JSON in
|
||||||
|
production is mostly only useful if you do log aggregation with tools like
|
||||||
|
Splunk or Logstash.
|
||||||
|
|
||||||
|
#### Formatters
|
||||||
|
|
||||||
|
The built-in logging formatters are:
|
||||||
|
|
||||||
|
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||||
|
without colors.
|
||||||
|
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||||
|
field to `true`. To force no colored output even if there is a TTY set the
|
||||||
|
`DisableColors` field to `true`. For Windows, see
|
||||||
|
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||||
|
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||||
|
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||||
|
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||||
|
|
||||||
|
Third party logging formatters:
|
||||||
|
|
||||||
|
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||||
|
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||||
|
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||||
|
|
||||||
|
You can define your formatter by implementing the `Formatter` interface,
|
||||||
|
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||||
|
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||||
|
default ones (see Entries section above):
|
||||||
|
|
||||||
|
```go
|
||||||
|
type MyJSONFormatter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
log.SetFormatter(new(MyJSONFormatter))
|
||||||
|
|
||||||
|
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
// Note this doesn't include Time, Level and Message which are available on
|
||||||
|
// the Entry. Consult `godoc` on information about those fields or read the
|
||||||
|
// source of the official loggers.
|
||||||
|
serialized, err := json.Marshal(entry.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Logger as an `io.Writer`
|
||||||
|
|
||||||
|
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||||
|
|
||||||
|
```go
|
||||||
|
w := logger.Writer()
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
srv := http.Server{
|
||||||
|
// create a stdlib log.Logger that writes to
|
||||||
|
// logrus.Logger.
|
||||||
|
ErrorLog: log.New(w, "", 0),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Each line written to that writer will be printed the usual way, using formatters
|
||||||
|
and hooks. The level for those entries is `info`.
|
||||||
|
|
||||||
|
This means that we can override the standard library logger easily:
|
||||||
|
|
||||||
|
```go
|
||||||
|
logger := logrus.New()
|
||||||
|
logger.Formatter = &logrus.JSONFormatter{}
|
||||||
|
|
||||||
|
// Use logrus for standard log output
|
||||||
|
// Note that `log` here references stdlib's log
|
||||||
|
// Not logrus imported under the name `log`.
|
||||||
|
log.SetOutput(logger.Writer())
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Rotation
|
||||||
|
|
||||||
|
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||||
|
external program (like `logrotate(8)`) that can compress and delete old log
|
||||||
|
entries. It should not be a feature of the application-level logger.
|
||||||
|
|
||||||
|
#### Tools
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
| ---- | ----------- |
|
||||||
|
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||||
|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||||
|
|
||||||
|
#### Testing
|
||||||
|
|
||||||
|
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||||
|
|
||||||
|
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
||||||
|
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||||
|
|
||||||
|
```go
|
||||||
|
import(
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/sirupsen/logrus/hooks/test"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSomething(t*testing.T){
|
||||||
|
logger, hook := test.NewNullLogger()
|
||||||
|
logger.Error("Helloerror")
|
||||||
|
|
||||||
|
assert.Equal(t, 1, len(hook.Entries))
|
||||||
|
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
|
||||||
|
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
|
||||||
|
|
||||||
|
hook.Reset()
|
||||||
|
assert.Nil(t, hook.LastEntry())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Fatal handlers
|
||||||
|
|
||||||
|
Logrus can register one or more functions that will be called when any `fatal`
|
||||||
|
level message is logged. The registered handlers will be executed before
|
||||||
|
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
||||||
|
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
||||||
|
|
||||||
|
```
|
||||||
|
...
|
||||||
|
handler := func() {
|
||||||
|
// gracefully shutdown something...
|
||||||
|
}
|
||||||
|
logrus.RegisterExitHandler(handler)
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Thread safety
|
||||||
|
|
||||||
|
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
||||||
|
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||||
|
|
||||||
|
Situation when locking is not needed includes:
|
||||||
|
|
||||||
|
* You have no hooks registered, or hooks calling is already thread-safe.
|
||||||
|
|
||||||
|
* Writing to logger.Out is already thread-safe, for example:
|
||||||
|
|
||||||
|
1) logger.Out is protected by locks.
|
||||||
|
|
||||||
|
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
|
||||||
|
|
||||||
|
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
36
vendor/github.com/spf13/cobra/.gitignore
generated
vendored
Normal file
36
vendor/github.com/spf13/cobra/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
|
||||||
|
# swap
|
||||||
|
[._]*.s[a-w][a-z]
|
||||||
|
[._]s[a-w][a-z]
|
||||||
|
# session
|
||||||
|
Session.vim
|
||||||
|
# temporary
|
||||||
|
.netrwhist
|
||||||
|
*~
|
||||||
|
# auto-generated tag files
|
||||||
|
tags
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
|
||||||
|
cobra.test
|
3
vendor/github.com/spf13/cobra/.mailmap
generated
vendored
Normal file
3
vendor/github.com/spf13/cobra/.mailmap
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
Steve Francia <steve.francia@gmail.com>
|
||||||
|
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||||
|
Fabiano Franz <ffranz@redhat.com> <contact@fabianofranz.com>
|
18
vendor/github.com/spf13/cobra/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/spf13/cobra/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.4.3
|
||||||
|
- 1.5.4
|
||||||
|
- 1.6.2
|
||||||
|
- tip
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- mkdir -p bin
|
||||||
|
- curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck
|
||||||
|
- chmod +x bin/shellcheck
|
||||||
|
script:
|
||||||
|
- PATH=$PATH:$PWD/bin go test -v ./...
|
||||||
|
- go build
|
892
vendor/github.com/spf13/cobra/README.md
generated
vendored
Normal file
892
vendor/github.com/spf13/cobra/README.md
generated
vendored
Normal file
|
@ -0,0 +1,892 @@
|
||||||
|
![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
|
||||||
|
|
||||||
|
Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
|
||||||
|
|
||||||
|
Many of the most widely used Go projects are built using Cobra including:
|
||||||
|
|
||||||
|
* [Kubernetes](http://kubernetes.io/)
|
||||||
|
* [Hugo](http://gohugo.io)
|
||||||
|
* [rkt](https://github.com/coreos/rkt)
|
||||||
|
* [etcd](https://github.com/coreos/etcd)
|
||||||
|
* [Docker (distribution)](https://github.com/docker/distribution)
|
||||||
|
* [OpenShift](https://www.openshift.com/)
|
||||||
|
* [Delve](https://github.com/derekparker/delve)
|
||||||
|
* [GopherJS](http://www.gopherjs.org/)
|
||||||
|
* [CockroachDB](http://www.cockroachlabs.com/)
|
||||||
|
* [Bleve](http://www.blevesearch.com/)
|
||||||
|
* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
|
||||||
|
* [Parse (CLI)](https://parse.com/)
|
||||||
|
* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
|
||||||
|
* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
|
||||||
|
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
|
||||||
|
[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
|
||||||
|
|
||||||
|
![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif)
|
||||||
|
|
||||||
|
# Overview
|
||||||
|
|
||||||
|
Cobra is a library providing a simple interface to create powerful modern CLI
|
||||||
|
interfaces similar to git & go tools.
|
||||||
|
|
||||||
|
Cobra is also an application that will generate your application scaffolding to rapidly
|
||||||
|
develop a Cobra-based application.
|
||||||
|
|
||||||
|
Cobra provides:
|
||||||
|
* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
|
||||||
|
* Fully POSIX-compliant flags (including short & long versions)
|
||||||
|
* Nested subcommands
|
||||||
|
* Global, local and cascading flags
|
||||||
|
* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname`
|
||||||
|
* Intelligent suggestions (`app srver`... did you mean `app server`?)
|
||||||
|
* Automatic help generation for commands and flags
|
||||||
|
* Automatic detailed help for `app help [command]`
|
||||||
|
* Automatic help flag recognition of `-h`, `--help`, etc.
|
||||||
|
* Automatically generated bash autocomplete for your application
|
||||||
|
* Automatically generated man pages for your application
|
||||||
|
* Command aliases so you can change things without breaking them
|
||||||
|
* The flexibilty to define your own help, usage, etc.
|
||||||
|
* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
|
||||||
|
|
||||||
|
Cobra has an exceptionally clean interface and simple design without needless
|
||||||
|
constructors or initialization methods.
|
||||||
|
|
||||||
|
Applications built with Cobra commands are designed to be as user-friendly as
|
||||||
|
possible. Flags can be placed before or after the command (as long as a
|
||||||
|
confusing space isn’t provided). Both short and long flags can be used. A
|
||||||
|
command need not even be fully typed. Help is automatically generated and
|
||||||
|
available for the application or for a specific command using either the help
|
||||||
|
command or the `--help` flag.
|
||||||
|
|
||||||
|
# Concepts
|
||||||
|
|
||||||
|
Cobra is built on a structure of commands, arguments & flags.
|
||||||
|
|
||||||
|
**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
|
||||||
|
|
||||||
|
The best applications will read like sentences when used. Users will know how
|
||||||
|
to use the application because they will natively understand how to use it.
|
||||||
|
|
||||||
|
The pattern to follow is
|
||||||
|
`APPNAME VERB NOUN --ADJECTIVE.`
|
||||||
|
or
|
||||||
|
`APPNAME COMMAND ARG --FLAG`
|
||||||
|
|
||||||
|
A few good real world examples may better illustrate this point.
|
||||||
|
|
||||||
|
In the following example, 'server' is a command, and 'port' is a flag:
|
||||||
|
|
||||||
|
> hugo server --port=1313
|
||||||
|
|
||||||
|
In this command we are telling Git to clone the url bare.
|
||||||
|
|
||||||
|
> git clone URL --bare
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
Command is the central point of the application. Each interaction that
|
||||||
|
the application supports will be contained in a Command. A command can
|
||||||
|
have children commands and optionally run an action.
|
||||||
|
|
||||||
|
In the example above, 'server' is the command.
|
||||||
|
|
||||||
|
A Command has the following structure:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Command struct {
|
||||||
|
Use string // The one-line usage message.
|
||||||
|
Short string // The short description shown in the 'help' output.
|
||||||
|
Long string // The long message shown in the 'help <this-command>' output.
|
||||||
|
Run func(cmd *Command, args []string) // Run runs the command.
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Flags
|
||||||
|
|
||||||
|
A Flag is a way to modify the behavior of a command. Cobra supports
|
||||||
|
fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
|
||||||
|
A Cobra command can define flags that persist through to children commands
|
||||||
|
and flags that are only available to that command.
|
||||||
|
|
||||||
|
In the example above, 'port' is the flag.
|
||||||
|
|
||||||
|
Flag functionality is provided by the [pflag
|
||||||
|
library](https://github.com/ogier/pflag), a fork of the flag standard library
|
||||||
|
which maintains the same interface while adding POSIX compliance.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Cobra works by creating a set of commands and then organizing them into a tree.
|
||||||
|
The tree defines the structure of the application.
|
||||||
|
|
||||||
|
Once each command is defined with its corresponding flags, then the
|
||||||
|
tree is assigned to the commander which is finally executed.
|
||||||
|
|
||||||
|
# Installing
|
||||||
|
Using Cobra is easy. First, use `go get` to install the latest version
|
||||||
|
of the library. This command will install the `cobra` generator executible
|
||||||
|
along with the library:
|
||||||
|
|
||||||
|
> go get -v github.com/spf13/cobra/cobra
|
||||||
|
|
||||||
|
Next, include Cobra in your application:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/spf13/cobra"
|
||||||
|
```
|
||||||
|
|
||||||
|
# Getting Started
|
||||||
|
|
||||||
|
While you are welcome to provide your own organization, typically a Cobra based
|
||||||
|
application will follow the following organizational structure.
|
||||||
|
|
||||||
|
```
|
||||||
|
▾ appName/
|
||||||
|
▾ cmd/
|
||||||
|
add.go
|
||||||
|
your.go
|
||||||
|
commands.go
|
||||||
|
here.go
|
||||||
|
main.go
|
||||||
|
```
|
||||||
|
|
||||||
|
In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "{pathToYourApp}/cmd"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := cmd.RootCmd.Execute(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(-1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using the Cobra Generator
|
||||||
|
|
||||||
|
Cobra provides its own program that will create your application and add any
|
||||||
|
commands you want. It's the easiest way to incorporate Cobra into your application.
|
||||||
|
|
||||||
|
### cobra init
|
||||||
|
|
||||||
|
The `cobra init [yourApp]` command will create your initial application code
|
||||||
|
for you. It is a very powerful application that will populate your program with
|
||||||
|
the right structure so you can immediately enjoy all the benefits of Cobra. It
|
||||||
|
will also automatically apply the license you specify to your application.
|
||||||
|
|
||||||
|
Cobra init is pretty smart. You can provide it a full path, or simply a path
|
||||||
|
similar to what is expected in the import.
|
||||||
|
|
||||||
|
```
|
||||||
|
cobra init github.com/spf13/newAppName
|
||||||
|
```
|
||||||
|
|
||||||
|
### cobra add
|
||||||
|
|
||||||
|
Once an application is initialized Cobra can create additional commands for you.
|
||||||
|
Let's say you created an app and you wanted the following commands for it:
|
||||||
|
|
||||||
|
* app serve
|
||||||
|
* app config
|
||||||
|
* app config create
|
||||||
|
|
||||||
|
In your project directory (where your main.go file is) you would run the following:
|
||||||
|
|
||||||
|
```
|
||||||
|
cobra add serve
|
||||||
|
cobra add config
|
||||||
|
cobra add create -p 'configCmd'
|
||||||
|
```
|
||||||
|
|
||||||
|
Once you have run these three commands you would have an app structure that would look like:
|
||||||
|
|
||||||
|
```
|
||||||
|
▾ app/
|
||||||
|
▾ cmd/
|
||||||
|
serve.go
|
||||||
|
config.go
|
||||||
|
create.go
|
||||||
|
main.go
|
||||||
|
```
|
||||||
|
|
||||||
|
at this point you can run `go run main.go` and it would run your app. `go run
|
||||||
|
main.go serve`, `go run main.go config`, `go run main.go config create` along
|
||||||
|
with `go run main.go help serve`, etc would all work.
|
||||||
|
|
||||||
|
Obviously you haven't added your own code to these yet, the commands are ready
|
||||||
|
for you to give them their tasks. Have fun.
|
||||||
|
|
||||||
|
### Configuring the cobra generator
|
||||||
|
|
||||||
|
The cobra generator will be easier to use if you provide a simple configuration
|
||||||
|
file which will help you eliminate providing a bunch of repeated information in
|
||||||
|
flags over and over.
|
||||||
|
|
||||||
|
An example ~/.cobra.yaml file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
author: Steve Francia <spf@spf13.com>
|
||||||
|
license: MIT
|
||||||
|
```
|
||||||
|
|
||||||
|
You can specify no license by setting `license` to `none` or you can specify
|
||||||
|
a custom license:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
license:
|
||||||
|
header: This file is part of {{ .appName }}.
|
||||||
|
text: |
|
||||||
|
{{ .copyright }}
|
||||||
|
|
||||||
|
This is my license. There are many like it, but this one is mine.
|
||||||
|
My license is my best friend. It is my life. I must master it as I must
|
||||||
|
master my life.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Manually implementing Cobra
|
||||||
|
|
||||||
|
To manually implement cobra you need to create a bare main.go file and a RootCmd file.
|
||||||
|
You will optionally provide additional commands as you see fit.
|
||||||
|
|
||||||
|
### Create the root command
|
||||||
|
|
||||||
|
The root command represents your binary itself.
|
||||||
|
|
||||||
|
|
||||||
|
#### Manually create rootCmd
|
||||||
|
|
||||||
|
Cobra doesn't require any special constructors. Simply create your commands.
|
||||||
|
|
||||||
|
Ideally you place this in app/cmd/root.go:
|
||||||
|
|
||||||
|
```go
|
||||||
|
var RootCmd = &cobra.Command{
|
||||||
|
Use: "hugo",
|
||||||
|
Short: "Hugo is a very fast static site generator",
|
||||||
|
Long: `A Fast and Flexible Static Site Generator built with
|
||||||
|
love by spf13 and friends in Go.
|
||||||
|
Complete documentation is available at http://hugo.spf13.com`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
// Do Stuff Here
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You will additionally define flags and handle configuration in your init() function.
|
||||||
|
|
||||||
|
for example cmd/root.go:
|
||||||
|
|
||||||
|
```go
|
||||||
|
func init() {
|
||||||
|
cobra.OnInitialize(initConfig)
|
||||||
|
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
|
||||||
|
RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
|
||||||
|
RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
|
||||||
|
RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
|
||||||
|
RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
|
||||||
|
viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author"))
|
||||||
|
viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase"))
|
||||||
|
viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper"))
|
||||||
|
viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
|
||||||
|
viper.SetDefault("license", "apache")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create your main.go
|
||||||
|
|
||||||
|
With the root command you need to have your main function execute it.
|
||||||
|
Execute should be run on the root for clarity, though it can be called on any command.
|
||||||
|
|
||||||
|
In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "{pathToYourApp}/cmd"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := cmd.RootCmd.Execute(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(-1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Create additional commands
|
||||||
|
|
||||||
|
Additional commands can be defined and typically are each given their own file
|
||||||
|
inside of the cmd/ directory.
|
||||||
|
|
||||||
|
If you wanted to create a version command you would create cmd/version.go and
|
||||||
|
populate it with the following:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RootCmd.AddCommand(versionCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
var versionCmd = &cobra.Command{
|
||||||
|
Use: "version",
|
||||||
|
Short: "Print the version number of Hugo",
|
||||||
|
Long: `All software has versions. This is Hugo's`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Attach command to its parent
|
||||||
|
|
||||||
|
|
||||||
|
If you notice in the above example we attach the command to its parent. In
|
||||||
|
this case the parent is the rootCmd. In this example we are attaching it to the
|
||||||
|
root, but commands can be attached at any level.
|
||||||
|
|
||||||
|
```go
|
||||||
|
RootCmd.AddCommand(versionCmd)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Remove a command from its parent
|
||||||
|
|
||||||
|
Removing a command is not a common action in simple programs, but it allows 3rd
|
||||||
|
parties to customize an existing command tree.
|
||||||
|
|
||||||
|
In this example, we remove the existing `VersionCmd` command of an existing
|
||||||
|
root command, and we replace it with our own version:
|
||||||
|
|
||||||
|
```go
|
||||||
|
mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd)
|
||||||
|
mainlib.RootCmd.AddCommand(versionCmd)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Working with Flags
|
||||||
|
|
||||||
|
Flags provide modifiers to control how the action command operates.
|
||||||
|
|
||||||
|
### Assign flags to a command
|
||||||
|
|
||||||
|
Since the flags are defined and used in different locations, we need to
|
||||||
|
define a variable outside with the correct scope to assign the flag to
|
||||||
|
work with.
|
||||||
|
|
||||||
|
```go
|
||||||
|
var Verbose bool
|
||||||
|
var Source string
|
||||||
|
```
|
||||||
|
|
||||||
|
There are two different approaches to assign a flag.
|
||||||
|
|
||||||
|
### Persistent Flags
|
||||||
|
|
||||||
|
A flag can be 'persistent' meaning that this flag will be available to the
|
||||||
|
command it's assigned to as well as every command under that command. For
|
||||||
|
global flags, assign a flag as a persistent flag on the root.
|
||||||
|
|
||||||
|
```go
|
||||||
|
RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Local Flags
|
||||||
|
|
||||||
|
A flag can also be assigned locally which will only apply to that specific command.
|
||||||
|
|
||||||
|
```go
|
||||||
|
RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
In the example below, we have defined three commands. Two are at the top level
|
||||||
|
and one (cmdTimes) is a child of one of the top commands. In this case the root
|
||||||
|
is not executable meaning that a subcommand is required. This is accomplished
|
||||||
|
by not providing a 'Run' for the 'rootCmd'.
|
||||||
|
|
||||||
|
We have only defined one flag for a single command.
|
||||||
|
|
||||||
|
More documentation about flags is available at https://github.com/spf13/pflag
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
var echoTimes int
|
||||||
|
|
||||||
|
var cmdPrint = &cobra.Command{
|
||||||
|
Use: "print [string to print]",
|
||||||
|
Short: "Print anything to the screen",
|
||||||
|
Long: `print is for printing anything back to the screen.
|
||||||
|
For many years people have printed back to the screen.
|
||||||
|
`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Println("Print: " + strings.Join(args, " "))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var cmdEcho = &cobra.Command{
|
||||||
|
Use: "echo [string to echo]",
|
||||||
|
Short: "Echo anything to the screen",
|
||||||
|
Long: `echo is for echoing anything back.
|
||||||
|
Echo works a lot like print, except it has a child command.
|
||||||
|
`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Println("Print: " + strings.Join(args, " "))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var cmdTimes = &cobra.Command{
|
||||||
|
Use: "times [# times] [string to echo]",
|
||||||
|
Short: "Echo anything to the screen more times",
|
||||||
|
Long: `echo things multiple times back to the user by providing
|
||||||
|
a count and a string.`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
for i := 0; i < echoTimes; i++ {
|
||||||
|
fmt.Println("Echo: " + strings.Join(args, " "))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
|
||||||
|
|
||||||
|
var rootCmd = &cobra.Command{Use: "app"}
|
||||||
|
rootCmd.AddCommand(cmdPrint, cmdEcho)
|
||||||
|
cmdEcho.AddCommand(cmdTimes)
|
||||||
|
rootCmd.Execute()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
|
||||||
|
|
||||||
|
## The Help Command
|
||||||
|
|
||||||
|
Cobra automatically adds a help command to your application when you have subcommands.
|
||||||
|
This will be called when a user runs 'app help'. Additionally, help will also
|
||||||
|
support all other commands as input. Say, for instance, you have a command called
|
||||||
|
'create' without any additional configuration; Cobra will work when 'app help
|
||||||
|
create' is called. Every command will automatically have the '--help' flag added.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
The following output is automatically generated by Cobra. Nothing beyond the
|
||||||
|
command and flag definitions are needed.
|
||||||
|
|
||||||
|
> hugo help
|
||||||
|
|
||||||
|
hugo is the main command, used to build your Hugo site.
|
||||||
|
|
||||||
|
Hugo is a Fast and Flexible Static Site Generator
|
||||||
|
built with love by spf13 and friends in Go.
|
||||||
|
|
||||||
|
Complete documentation is available at http://gohugo.io/.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
hugo [flags]
|
||||||
|
hugo [command]
|
||||||
|
|
||||||
|
Available Commands:
|
||||||
|
server Hugo runs its own webserver to render the files
|
||||||
|
version Print the version number of Hugo
|
||||||
|
config Print the site configuration
|
||||||
|
check Check content in the source directory
|
||||||
|
benchmark Benchmark hugo by building a site a number of times.
|
||||||
|
convert Convert your content to different formats
|
||||||
|
new Create new content for your site
|
||||||
|
list Listing out various types of content
|
||||||
|
undraft Undraft changes the content's draft status from 'True' to 'False'
|
||||||
|
genautocomplete Generate shell autocompletion script for Hugo
|
||||||
|
gendoc Generate Markdown documentation for the Hugo CLI.
|
||||||
|
genman Generate man page for Hugo
|
||||||
|
import Import your site from others.
|
||||||
|
|
||||||
|
Flags:
|
||||||
|
-b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/
|
||||||
|
-D, --buildDrafts[=false]: include content marked as draft
|
||||||
|
-F, --buildFuture[=false]: include content with publishdate in the future
|
||||||
|
--cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/
|
||||||
|
--canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL
|
||||||
|
--config="": config file (default is path/config.yaml|json|toml)
|
||||||
|
-d, --destination="": filesystem path to write files to
|
||||||
|
--disableRSS[=false]: Do not build RSS files
|
||||||
|
--disableSitemap[=false]: Do not build Sitemap file
|
||||||
|
--editor="": edit new content with this editor, if provided
|
||||||
|
--ignoreCache[=false]: Ignores the cache directory for reading but still writes to it
|
||||||
|
--log[=false]: Enable Logging
|
||||||
|
--logFile="": Log File path (if set, logging enabled automatically)
|
||||||
|
--noTimes[=false]: Don't sync modification time of files
|
||||||
|
--pluralizeListTitles[=true]: Pluralize titles in lists using inflect
|
||||||
|
--preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu")
|
||||||
|
-s, --source="": filesystem path to read files relative from
|
||||||
|
--stepAnalysis[=false]: display memory and timing of different steps of the program
|
||||||
|
-t, --theme="": theme to use (located in /themes/THEMENAME/)
|
||||||
|
--uglyURLs[=false]: if true, use /filename.html instead of /filename/
|
||||||
|
-v, --verbose[=false]: verbose output
|
||||||
|
--verboseLog[=false]: verbose logging
|
||||||
|
-w, --watch[=false]: watch filesystem for changes and recreate as needed
|
||||||
|
|
||||||
|
Use "hugo [command] --help" for more information about a command.
|
||||||
|
|
||||||
|
|
||||||
|
Help is just a command like any other. There is no special logic or behavior
|
||||||
|
around it. In fact, you can provide your own if you want.
|
||||||
|
|
||||||
|
### Defining your own help
|
||||||
|
|
||||||
|
You can provide your own Help command or your own template for the default command to use.
|
||||||
|
|
||||||
|
The default help command is
|
||||||
|
|
||||||
|
```go
|
||||||
|
func (c *Command) initHelp() {
|
||||||
|
if c.helpCommand == nil {
|
||||||
|
c.helpCommand = &Command{
|
||||||
|
Use: "help [command]",
|
||||||
|
Short: "Help about any command",
|
||||||
|
Long: `Help provides help for any command in the application.
|
||||||
|
Simply type ` + c.Name() + ` help [path to command] for full details.`,
|
||||||
|
Run: c.HelpFunc(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.AddCommand(c.helpCommand)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You can provide your own command, function or template through the following methods:
|
||||||
|
|
||||||
|
```go
|
||||||
|
command.SetHelpCommand(cmd *Command)
|
||||||
|
|
||||||
|
command.SetHelpFunc(f func(*Command, []string))
|
||||||
|
|
||||||
|
command.SetHelpTemplate(s string)
|
||||||
|
```
|
||||||
|
|
||||||
|
The latter two will also apply to any children commands.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
When the user provides an invalid flag or invalid command, Cobra responds by
|
||||||
|
showing the user the 'usage'.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
You may recognize this from the help above. That's because the default help
|
||||||
|
embeds the usage as part of its output.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
hugo [flags]
|
||||||
|
hugo [command]
|
||||||
|
|
||||||
|
Available Commands:
|
||||||
|
server Hugo runs its own webserver to render the files
|
||||||
|
version Print the version number of Hugo
|
||||||
|
config Print the site configuration
|
||||||
|
check Check content in the source directory
|
||||||
|
benchmark Benchmark hugo by building a site a number of times.
|
||||||
|
convert Convert your content to different formats
|
||||||
|
new Create new content for your site
|
||||||
|
list Listing out various types of content
|
||||||
|
undraft Undraft changes the content's draft status from 'True' to 'False'
|
||||||
|
genautocomplete Generate shell autocompletion script for Hugo
|
||||||
|
gendoc Generate Markdown documentation for the Hugo CLI.
|
||||||
|
genman Generate man page for Hugo
|
||||||
|
import Import your site from others.
|
||||||
|
|
||||||
|
Flags:
|
||||||
|
-b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/
|
||||||
|
-D, --buildDrafts[=false]: include content marked as draft
|
||||||
|
-F, --buildFuture[=false]: include content with publishdate in the future
|
||||||
|
--cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/
|
||||||
|
--canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL
|
||||||
|
--config="": config file (default is path/config.yaml|json|toml)
|
||||||
|
-d, --destination="": filesystem path to write files to
|
||||||
|
--disableRSS[=false]: Do not build RSS files
|
||||||
|
--disableSitemap[=false]: Do not build Sitemap file
|
||||||
|
--editor="": edit new content with this editor, if provided
|
||||||
|
--ignoreCache[=false]: Ignores the cache directory for reading but still writes to it
|
||||||
|
--log[=false]: Enable Logging
|
||||||
|
--logFile="": Log File path (if set, logging enabled automatically)
|
||||||
|
--noTimes[=false]: Don't sync modification time of files
|
||||||
|
--pluralizeListTitles[=true]: Pluralize titles in lists using inflect
|
||||||
|
--preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu")
|
||||||
|
-s, --source="": filesystem path to read files relative from
|
||||||
|
--stepAnalysis[=false]: display memory and timing of different steps of the program
|
||||||
|
-t, --theme="": theme to use (located in /themes/THEMENAME/)
|
||||||
|
--uglyURLs[=false]: if true, use /filename.html instead of /filename/
|
||||||
|
-v, --verbose[=false]: verbose output
|
||||||
|
--verboseLog[=false]: verbose logging
|
||||||
|
-w, --watch[=false]: watch filesystem for changes and recreate as needed
|
||||||
|
|
||||||
|
### Defining your own usage
|
||||||
|
You can provide your own usage function or template for Cobra to use.
|
||||||
|
|
||||||
|
The default usage function is:
|
||||||
|
|
||||||
|
```go
|
||||||
|
return func(c *Command) error {
|
||||||
|
err := tmpl(c.Out(), c.UsageTemplate(), c)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Like help, the function and template are overridable through public methods:
|
||||||
|
|
||||||
|
```go
|
||||||
|
command.SetUsageFunc(f func(*Command) error)
|
||||||
|
|
||||||
|
command.SetUsageTemplate(s string)
|
||||||
|
```
|
||||||
|
|
||||||
|
## PreRun or PostRun Hooks
|
||||||
|
|
||||||
|
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order:
|
||||||
|
|
||||||
|
- `PersistentPreRun`
|
||||||
|
- `PreRun`
|
||||||
|
- `Run`
|
||||||
|
- `PostRun`
|
||||||
|
- `PersistentPostRun`
|
||||||
|
|
||||||
|
An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
var rootCmd = &cobra.Command{
|
||||||
|
Use: "root [sub]",
|
||||||
|
Short: "My root command",
|
||||||
|
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
|
||||||
|
},
|
||||||
|
PreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
|
||||||
|
},
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside rootCmd Run with args: %v\n", args)
|
||||||
|
},
|
||||||
|
PostRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
|
||||||
|
},
|
||||||
|
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var subCmd = &cobra.Command{
|
||||||
|
Use: "sub [no options!]",
|
||||||
|
Short: "My subcommand",
|
||||||
|
PreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
|
||||||
|
},
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside subCmd Run with args: %v\n", args)
|
||||||
|
},
|
||||||
|
PostRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
|
||||||
|
},
|
||||||
|
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rootCmd.AddCommand(subCmd)
|
||||||
|
|
||||||
|
rootCmd.SetArgs([]string{""})
|
||||||
|
_ = rootCmd.Execute()
|
||||||
|
fmt.Print("\n")
|
||||||
|
rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
|
||||||
|
_ = rootCmd.Execute()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Alternative Error Handling
|
||||||
|
|
||||||
|
Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top,
|
||||||
|
providing a way to handle the errors in one location. The current list of functions that return an error is:
|
||||||
|
|
||||||
|
* PersistentPreRunE
|
||||||
|
* PreRunE
|
||||||
|
* RunE
|
||||||
|
* PostRunE
|
||||||
|
* PersistentPostRunE
|
||||||
|
|
||||||
|
If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage`
|
||||||
|
and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent
|
||||||
|
command.
|
||||||
|
|
||||||
|
**Example Usage using RunE:**
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var rootCmd = &cobra.Command{
|
||||||
|
Use: "hugo",
|
||||||
|
Short: "Hugo is a very fast static site generator",
|
||||||
|
Long: `A Fast and Flexible Static Site Generator built with
|
||||||
|
love by spf13 and friends in Go.
|
||||||
|
Complete documentation is available at http://hugo.spf13.com`,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
// Do Stuff Here
|
||||||
|
return errors.New("some random error")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Suggestions when "unknown command" happens
|
||||||
|
|
||||||
|
Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ hugo srever
|
||||||
|
Error: unknown command "srever" for "hugo"
|
||||||
|
|
||||||
|
Did you mean this?
|
||||||
|
server
|
||||||
|
|
||||||
|
Run 'hugo --help' for usage.
|
||||||
|
```
|
||||||
|
|
||||||
|
Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
|
||||||
|
|
||||||
|
If you need to disable suggestions or tweak the string distance in your command, use:
|
||||||
|
|
||||||
|
```go
|
||||||
|
command.DisableSuggestions = true
|
||||||
|
```
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
```go
|
||||||
|
command.SuggestionsMinimumDistance = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ kubectl remove
|
||||||
|
Error: unknown command "remove" for "kubectl"
|
||||||
|
|
||||||
|
Did you mean this?
|
||||||
|
delete
|
||||||
|
|
||||||
|
Run 'kubectl help' for usage.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generating Markdown-formatted documentation for your command
|
||||||
|
|
||||||
|
Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md).
|
||||||
|
|
||||||
|
## Generating man pages for your command
|
||||||
|
|
||||||
|
Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md).
|
||||||
|
|
||||||
|
## Generating bash completions for your command
|
||||||
|
|
||||||
|
Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
|
||||||
|
|
||||||
|
## Debugging
|
||||||
|
|
||||||
|
Cobra provides a ‘DebugFlags’ method on a command which, when called, will print
|
||||||
|
out everything Cobra knows about the flags for each command.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
command.DebugFlags()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Release Notes
|
||||||
|
* **0.9.0** June 17, 2014
|
||||||
|
* flags can appears anywhere in the args (provided they are unambiguous)
|
||||||
|
* --help prints usage screen for app or command
|
||||||
|
* Prefix matching for commands
|
||||||
|
* Cleaner looking help and usage output
|
||||||
|
* Extensive test suite
|
||||||
|
* **0.8.0** Nov 5, 2013
|
||||||
|
* Reworked interface to remove commander completely
|
||||||
|
* Command now primary structure
|
||||||
|
* No initialization needed
|
||||||
|
* Usage & Help templates & functions definable at any level
|
||||||
|
* Updated Readme
|
||||||
|
* **0.7.0** Sept 24, 2013
|
||||||
|
* Needs more eyes
|
||||||
|
* Test suite
|
||||||
|
* Support for automatic error messages
|
||||||
|
* Support for help command
|
||||||
|
* Support for printing to any io.Writer instead of os.Stderr
|
||||||
|
* Support for persistent flags which cascade down tree
|
||||||
|
* Ready for integration into Hugo
|
||||||
|
* **0.1.0** Sept 3, 2013
|
||||||
|
* Implement first draft
|
||||||
|
|
||||||
|
## Extensions
|
||||||
|
|
||||||
|
Libraries for extending Cobra:
|
||||||
|
|
||||||
|
* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`.
|
||||||
|
|
||||||
|
## ToDo
|
||||||
|
* Launch proper documentation site
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
1. Fork it
|
||||||
|
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||||
|
3. Commit your changes (`git commit -am 'Add some feature'`)
|
||||||
|
4. Push to the branch (`git push origin my-new-feature`)
|
||||||
|
5. Create new Pull Request
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
Names in no particular order:
|
||||||
|
|
||||||
|
* [spf13](https://github.com/spf13),
|
||||||
|
[eparis](https://github.com/eparis),
|
||||||
|
[bep](https://github.com/bep), and many more!
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
|
||||||
|
|
||||||
|
|
||||||
|
[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
|
206
vendor/github.com/spf13/cobra/bash_completions.md
generated
vendored
Normal file
206
vendor/github.com/spf13/cobra/bash_completions.md
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
||||||
|
# Generating Bash Completions For Your Own cobra.Command
|
||||||
|
|
||||||
|
Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard)
|
||||||
|
kubectl.GenBashCompletionFile("out.sh")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
|
||||||
|
|
||||||
|
## Creating your own custom functions
|
||||||
|
|
||||||
|
Some more actual code that works in kubernetes:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
const (
|
||||||
|
bash_completion_func = `__kubectl_parse_get()
|
||||||
|
{
|
||||||
|
local kubectl_output out
|
||||||
|
if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
|
||||||
|
out=($(echo "${kubectl_output}" | awk '{print $1}'))
|
||||||
|
COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
__kubectl_get_resource()
|
||||||
|
{
|
||||||
|
if [[ ${#nouns[@]} -eq 0 ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
__kubectl_parse_get ${nouns[${#nouns[@]} -1]}
|
||||||
|
if [[ $? -eq 0 ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
__custom_func() {
|
||||||
|
case ${last_command} in
|
||||||
|
kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
|
||||||
|
__kubectl_get_resource
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
```
|
||||||
|
|
||||||
|
And then I set that in my command definition:
|
||||||
|
|
||||||
|
```go
|
||||||
|
cmds := &cobra.Command{
|
||||||
|
Use: "kubectl",
|
||||||
|
Short: "kubectl controls the Kubernetes cluster manager",
|
||||||
|
Long: `kubectl controls the Kubernetes cluster manager.
|
||||||
|
|
||||||
|
Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
|
||||||
|
Run: runHelp,
|
||||||
|
BashCompletionFunction: bash_completion_func,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
|
||||||
|
|
||||||
|
## Have the completions code complete your 'nouns'
|
||||||
|
|
||||||
|
In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
|
||||||
|
|
||||||
|
```go
|
||||||
|
validArgs []string = { "pod", "node", "service", "replicationcontroller" }
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
|
||||||
|
Short: "Display one or many resources",
|
||||||
|
Long: get_long,
|
||||||
|
Example: get_example,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := RunGet(f, out, cmd, args)
|
||||||
|
util.CheckErr(err)
|
||||||
|
},
|
||||||
|
ValidArgs: validArgs,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# kubectl get [tab][tab]
|
||||||
|
node pod replicationcontroller service
|
||||||
|
```
|
||||||
|
|
||||||
|
## Plural form and shortcuts for nouns
|
||||||
|
|
||||||
|
If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
|
||||||
|
|
||||||
|
```go`
|
||||||
|
argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
...
|
||||||
|
ValidArgs: validArgs,
|
||||||
|
ArgAliases: argAliases
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
|
||||||
|
the completion algorithm if entered manually, e.g. in:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# kubectl get rc [tab][tab]
|
||||||
|
backend frontend database
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
|
||||||
|
in this example again instead of the replication controllers.
|
||||||
|
|
||||||
|
## Mark flags as required
|
||||||
|
|
||||||
|
Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy.
|
||||||
|
|
||||||
|
```go
|
||||||
|
cmd.MarkFlagRequired("pod")
|
||||||
|
cmd.MarkFlagRequired("container")
|
||||||
|
```
|
||||||
|
|
||||||
|
and you'll get something like
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# kubectl exec [tab][tab][tab]
|
||||||
|
-c --container= -p --pod=
|
||||||
|
```
|
||||||
|
|
||||||
|
# Specify valid filename extensions for flags that take a filename
|
||||||
|
|
||||||
|
In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions.
|
||||||
|
|
||||||
|
```go
|
||||||
|
annotations := []string{"json", "yaml", "yml"}
|
||||||
|
annotation := make(map[string][]string)
|
||||||
|
annotation[cobra.BashCompFilenameExt] = annotations
|
||||||
|
|
||||||
|
flag := &pflag.Flag{
|
||||||
|
Name: "filename",
|
||||||
|
Shorthand: "f",
|
||||||
|
Usage: usage,
|
||||||
|
Value: value,
|
||||||
|
DefValue: value.String(),
|
||||||
|
Annotations: annotation,
|
||||||
|
}
|
||||||
|
cmd.Flags().AddFlag(flag)
|
||||||
|
```
|
||||||
|
|
||||||
|
Now when you run a command with this filename flag you'll get something like
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# kubectl create -f
|
||||||
|
test/ example/ rpmbuild/
|
||||||
|
hello.yml test.json
|
||||||
|
```
|
||||||
|
|
||||||
|
So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.
|
||||||
|
|
||||||
|
# Specifiy custom flag completion
|
||||||
|
|
||||||
|
Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specifiy
|
||||||
|
a custom flag completion function with cobra.BashCompCustom:
|
||||||
|
|
||||||
|
```go
|
||||||
|
annotation := make(map[string][]string)
|
||||||
|
annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"}
|
||||||
|
|
||||||
|
flag := &pflag.Flag{
|
||||||
|
Name: "namespace",
|
||||||
|
Usage: usage,
|
||||||
|
Annotations: annotation,
|
||||||
|
}
|
||||||
|
cmd.Flags().AddFlag(flag)
|
||||||
|
```
|
||||||
|
|
||||||
|
In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
|
||||||
|
value, e.g.:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
__kubectl_get_namespaces()
|
||||||
|
{
|
||||||
|
local template
|
||||||
|
template="{{ range .items }}{{ .metadata.name }} {{ end }}"
|
||||||
|
local kubectl_out
|
||||||
|
if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
|
||||||
|
COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
```
|
17
vendor/github.com/spf13/pflag/.travis.yml
generated
vendored
Normal file
17
vendor/github.com/spf13/pflag/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
sudo: false
|
||||||
|
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.5
|
||||||
|
- 1.6
|
||||||
|
- tip
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get github.com/golang/lint/golint
|
||||||
|
- export PATH=$GOPATH/bin:$PATH
|
||||||
|
- go install ./...
|
||||||
|
|
||||||
|
script:
|
||||||
|
- verify/all.sh -v
|
||||||
|
- go test ./...
|
275
vendor/github.com/spf13/pflag/README.md
generated
vendored
Normal file
275
vendor/github.com/spf13/pflag/README.md
generated
vendored
Normal file
|
@ -0,0 +1,275 @@
|
||||||
|
[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
pflag is a drop-in replacement for Go's flag package, implementing
|
||||||
|
POSIX/GNU-style --flags.
|
||||||
|
|
||||||
|
pflag is compatible with the [GNU extensions to the POSIX recommendations
|
||||||
|
for command-line options][1]. For a more precise description, see the
|
||||||
|
"Command-line flag syntax" section below.
|
||||||
|
|
||||||
|
[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
|
||||||
|
|
||||||
|
pflag is available under the same style of BSD license as the Go language,
|
||||||
|
which can be found in the LICENSE file.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
pflag is available using the standard `go get` command.
|
||||||
|
|
||||||
|
Install by running:
|
||||||
|
|
||||||
|
go get github.com/spf13/pflag
|
||||||
|
|
||||||
|
Run tests by running:
|
||||||
|
|
||||||
|
go test github.com/spf13/pflag
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
pflag is a drop-in replacement of Go's native flag package. If you import
|
||||||
|
pflag under the name "flag" then all code should continue to function
|
||||||
|
with no changes.
|
||||||
|
|
||||||
|
``` go
|
||||||
|
import flag "github.com/spf13/pflag"
|
||||||
|
```
|
||||||
|
|
||||||
|
There is one exception to this: if you directly instantiate the Flag struct
|
||||||
|
there is one more field "Shorthand" that you will need to set.
|
||||||
|
Most code never instantiates this struct directly, and instead uses
|
||||||
|
functions such as String(), BoolVar(), and Var(), and is therefore
|
||||||
|
unaffected.
|
||||||
|
|
||||||
|
Define flags using flag.String(), Bool(), Int(), etc.
|
||||||
|
|
||||||
|
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
|
||||||
|
|
||||||
|
``` go
|
||||||
|
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
|
||||||
|
```
|
||||||
|
|
||||||
|
If you like, you can bind the flag to a variable using the Var() functions.
|
||||||
|
|
||||||
|
``` go
|
||||||
|
var flagvar int
|
||||||
|
func init() {
|
||||||
|
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Or you can create custom flags that satisfy the Value interface (with
|
||||||
|
pointer receivers) and couple them to flag parsing by
|
||||||
|
|
||||||
|
``` go
|
||||||
|
flag.Var(&flagVal, "name", "help message for flagname")
|
||||||
|
```
|
||||||
|
|
||||||
|
For such flags, the default value is just the initial value of the variable.
|
||||||
|
|
||||||
|
After all flags are defined, call
|
||||||
|
|
||||||
|
``` go
|
||||||
|
flag.Parse()
|
||||||
|
```
|
||||||
|
|
||||||
|
to parse the command line into the defined flags.
|
||||||
|
|
||||||
|
Flags may then be used directly. If you're using the flags themselves,
|
||||||
|
they are all pointers; if you bind to variables, they're values.
|
||||||
|
|
||||||
|
``` go
|
||||||
|
fmt.Println("ip has value ", *ip)
|
||||||
|
fmt.Println("flagvar has value ", flagvar)
|
||||||
|
```
|
||||||
|
|
||||||
|
There are helpers function to get values later if you have the FlagSet but
|
||||||
|
it was difficult to keep up with all of the flag pointers in your code.
|
||||||
|
If you have a pflag.FlagSet with a flag called 'flagname' of type int you
|
||||||
|
can use GetInt() to get the int value. But notice that 'flagname' must exist
|
||||||
|
and it must be an int. GetString("flagname") will fail.
|
||||||
|
|
||||||
|
``` go
|
||||||
|
i, err := flagset.GetInt("flagname")
|
||||||
|
```
|
||||||
|
|
||||||
|
After parsing, the arguments after the flag are available as the
|
||||||
|
slice flag.Args() or individually as flag.Arg(i).
|
||||||
|
The arguments are indexed from 0 through flag.NArg()-1.
|
||||||
|
|
||||||
|
The pflag package also defines some new functions that are not in flag,
|
||||||
|
that give one-letter shorthands for flags. You can use these by appending
|
||||||
|
'P' to the name of any function that defines a flag.
|
||||||
|
|
||||||
|
``` go
|
||||||
|
var ip = flag.IntP("flagname", "f", 1234, "help message")
|
||||||
|
var flagvar bool
|
||||||
|
func init() {
|
||||||
|
flag.BoolVarP("boolname", "b", true, "help message")
|
||||||
|
}
|
||||||
|
flag.VarP(&flagVar, "varname", "v", 1234, "help message")
|
||||||
|
```
|
||||||
|
|
||||||
|
Shorthand letters can be used with single dashes on the command line.
|
||||||
|
Boolean shorthand flags can be combined with other shorthand flags.
|
||||||
|
|
||||||
|
The default set of command-line flags is controlled by
|
||||||
|
top-level functions. The FlagSet type allows one to define
|
||||||
|
independent sets of flags, such as to implement subcommands
|
||||||
|
in a command-line interface. The methods of FlagSet are
|
||||||
|
analogous to the top-level functions for the command-line
|
||||||
|
flag set.
|
||||||
|
|
||||||
|
## Setting no option default values for flags
|
||||||
|
|
||||||
|
After you create a flag it is possible to set the pflag.NoOptDefVal for
|
||||||
|
the given flag. Doing this changes the meaning of the flag slightly. If
|
||||||
|
a flag has a NoOptDefVal and the flag is set on the command line without
|
||||||
|
an option the flag will be set to the NoOptDefVal. For example given:
|
||||||
|
|
||||||
|
``` go
|
||||||
|
var ip = flag.IntP("flagname", "f", 1234, "help message")
|
||||||
|
flag.Lookup("flagname").NoOptDefVal = "4321"
|
||||||
|
```
|
||||||
|
|
||||||
|
Would result in something like
|
||||||
|
|
||||||
|
| Parsed Arguments | Resulting Value |
|
||||||
|
| ------------- | ------------- |
|
||||||
|
| --flagname=1357 | ip=1357 |
|
||||||
|
| --flagname | ip=4321 |
|
||||||
|
| [nothing] | ip=1234 |
|
||||||
|
|
||||||
|
## Command line flag syntax
|
||||||
|
|
||||||
|
```
|
||||||
|
--flag // boolean flags, or flags with no option default values
|
||||||
|
--flag x // only on flags without a default value
|
||||||
|
--flag=x
|
||||||
|
```
|
||||||
|
|
||||||
|
Unlike the flag package, a single dash before an option means something
|
||||||
|
different than a double dash. Single dashes signify a series of shorthand
|
||||||
|
letters for flags. All but the last shorthand letter must be boolean flags
|
||||||
|
or a flag with a default value
|
||||||
|
|
||||||
|
```
|
||||||
|
// boolean or flags where the 'no option default value' is set
|
||||||
|
-f
|
||||||
|
-f=true
|
||||||
|
-abc
|
||||||
|
but
|
||||||
|
-b true is INVALID
|
||||||
|
|
||||||
|
// non-boolean and flags without a 'no option default value'
|
||||||
|
-n 1234
|
||||||
|
-n=1234
|
||||||
|
-n1234
|
||||||
|
|
||||||
|
// mixed
|
||||||
|
-abcs "hello"
|
||||||
|
-absd="hello"
|
||||||
|
-abcs1234
|
||||||
|
```
|
||||||
|
|
||||||
|
Flag parsing stops after the terminator "--". Unlike the flag package,
|
||||||
|
flags can be interspersed with arguments anywhere on the command line
|
||||||
|
before this terminator.
|
||||||
|
|
||||||
|
Integer flags accept 1234, 0664, 0x1234 and may be negative.
|
||||||
|
Boolean flags (in their long form) accept 1, 0, t, f, true, false,
|
||||||
|
TRUE, FALSE, True, False.
|
||||||
|
Duration flags accept any input valid for time.ParseDuration.
|
||||||
|
|
||||||
|
## Mutating or "Normalizing" Flag names
|
||||||
|
|
||||||
|
It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
|
||||||
|
|
||||||
|
**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
|
||||||
|
|
||||||
|
``` go
|
||||||
|
func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
|
||||||
|
from := []string{"-", "_"}
|
||||||
|
to := "."
|
||||||
|
for _, sep := range from {
|
||||||
|
name = strings.Replace(name, sep, to, -1)
|
||||||
|
}
|
||||||
|
return pflag.NormalizedName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
|
||||||
|
|
||||||
|
``` go
|
||||||
|
func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
|
||||||
|
switch name {
|
||||||
|
case "old-flag-name":
|
||||||
|
name = "new-flag-name"
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return pflag.NormalizedName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deprecating a flag or its shorthand
|
||||||
|
It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
|
||||||
|
|
||||||
|
**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
|
||||||
|
```go
|
||||||
|
// deprecate a flag by specifying its name and a usage message
|
||||||
|
flags.MarkDeprecated("badflag", "please use --good-flag instead")
|
||||||
|
```
|
||||||
|
This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
|
||||||
|
|
||||||
|
**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
|
||||||
|
```go
|
||||||
|
// deprecate a flag shorthand by specifying its flag name and a usage message
|
||||||
|
flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
|
||||||
|
```
|
||||||
|
This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
|
||||||
|
|
||||||
|
Note that usage message is essential here, and it should not be empty.
|
||||||
|
|
||||||
|
## Hidden flags
|
||||||
|
It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
|
||||||
|
|
||||||
|
**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
|
||||||
|
```go
|
||||||
|
// hide a flag by specifying its name
|
||||||
|
flags.MarkHidden("secretFlag")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supporting Go flags when using pflag
|
||||||
|
In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
|
||||||
|
to support flags defined by third-party dependencies (e.g. `golang/glog`).
|
||||||
|
|
||||||
|
**Example**: You want to add the Go flags to the `CommandLine` flagset
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
goflag "flag"
|
||||||
|
flag "github.com/spf13/pflag"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
|
||||||
|
flag.Parse()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## More info
|
||||||
|
|
||||||
|
You can see the full reference documentation of the pflag package
|
||||||
|
[at godoc.org][3], or through go's standard documentation system by
|
||||||
|
running `godoc -http=:6060` and browsing to
|
||||||
|
[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
|
||||||
|
installation.
|
||||||
|
|
||||||
|
[2]: http://localhost:6060/pkg/github.com/ogier/pflag
|
||||||
|
[3]: http://godoc.org/github.com/ogier/pflag
|
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
19
vendor/golang.org/x/lint/.travis.yml
generated
vendored
Normal file
19
vendor/golang.org/x/lint/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
- master
|
||||||
|
|
||||||
|
go_import_path: golang.org/x/lint
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get -t -v ./...
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -v -race ./...
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: master
|
||||||
|
fast_finish: true
|
15
vendor/golang.org/x/lint/CONTRIBUTING.md
generated
vendored
Normal file
15
vendor/golang.org/x/lint/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
# Contributing to Golint
|
||||||
|
|
||||||
|
## Before filing an issue:
|
||||||
|
|
||||||
|
### Are you having trouble building golint?
|
||||||
|
|
||||||
|
Check you have the latest version of its dependencies. Run
|
||||||
|
```
|
||||||
|
go get -u golang.org/x/lint/golint
|
||||||
|
```
|
||||||
|
If you still have problems, consider searching for existing issues before filing a new issue.
|
||||||
|
|
||||||
|
## Before sending a pull request:
|
||||||
|
|
||||||
|
Have you understood the purpose of golint? Make sure to carefully read `README`.
|
86
vendor/golang.org/x/lint/README.md
generated
vendored
Normal file
86
vendor/golang.org/x/lint/README.md
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
Golint is a linter for Go source code.
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
Golint requires a
|
||||||
|
[supported release of Go](https://golang.org/doc/devel/release.html#policy).
|
||||||
|
|
||||||
|
go get -u golang.org/x/lint/golint
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Invoke `golint` with one or more filenames, directories, or packages named
|
||||||
|
by its import path. Golint uses the same
|
||||||
|
[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
|
||||||
|
the `go` command and therefore
|
||||||
|
also supports relative import paths like `./...`. Additionally the `...`
|
||||||
|
wildcard can be used as suffix on relative and absolute file paths to recurse
|
||||||
|
into them.
|
||||||
|
|
||||||
|
The output of this tool is a list of suggestions in Vim quickfix format,
|
||||||
|
which is accepted by lots of different editors.
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
Golint differs from gofmt. Gofmt reformats Go source code, whereas
|
||||||
|
golint prints out style mistakes.
|
||||||
|
|
||||||
|
Golint differs from govet. Govet is concerned with correctness, whereas
|
||||||
|
golint is concerned with coding style. Golint is in use at Google, and it
|
||||||
|
seeks to match the accepted style of the open source Go project.
|
||||||
|
|
||||||
|
The suggestions made by golint are exactly that: suggestions.
|
||||||
|
Golint is not perfect, and has both false positives and false negatives.
|
||||||
|
Do not treat its output as a gold standard. We will not be adding pragmas
|
||||||
|
or other knobs to suppress specific warnings, so do not expect or require
|
||||||
|
code to be completely "lint-free".
|
||||||
|
In short, this tool is not, and will never be, trustworthy enough for its
|
||||||
|
suggestions to be enforced automatically, for example as part of a build process.
|
||||||
|
Golint makes suggestions for many of the mechanically checkable items listed in
|
||||||
|
[Effective Go](https://golang.org/doc/effective_go.html) and the
|
||||||
|
[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
Golint is meant to carry out the stylistic conventions put forth in
|
||||||
|
[Effective Go](https://golang.org/doc/effective_go.html) and
|
||||||
|
[CodeReviewComments](https://golang.org/wiki/CodeReviewComments).
|
||||||
|
Changes that are not aligned with those documents will not be considered.
|
||||||
|
|
||||||
|
## Contributions
|
||||||
|
|
||||||
|
Contributions to this project are welcome provided they are [in scope](#scope),
|
||||||
|
though please send mail before starting work on anything major.
|
||||||
|
Contributors retain their copyright, so we need you to fill out
|
||||||
|
[a short form](https://developers.google.com/open-source/cla/individual)
|
||||||
|
before we can accept your contribution.
|
||||||
|
|
||||||
|
## Vim
|
||||||
|
|
||||||
|
Add this to your ~/.vimrc:
|
||||||
|
|
||||||
|
set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim
|
||||||
|
|
||||||
|
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
|
||||||
|
|
||||||
|
Running `:Lint` will run golint on the current file and populate the quickfix list.
|
||||||
|
|
||||||
|
Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w`
|
||||||
|
|
||||||
|
autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
|
||||||
|
|
||||||
|
|
||||||
|
## Emacs
|
||||||
|
|
||||||
|
Add this to your `.emacs` file:
|
||||||
|
|
||||||
|
(add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs"))
|
||||||
|
(require 'golint)
|
||||||
|
|
||||||
|
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
|
||||||
|
|
||||||
|
Running M-x golint will run golint on the current file.
|
||||||
|
|
||||||
|
For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html).
|
2
vendor/golang.org/x/lint/golint/golint.go
generated
vendored
2
vendor/golang.org/x/lint/golint/golint.go
generated
vendored
|
@ -5,7 +5,7 @@
|
||||||
// https://developers.google.com/open-source/licenses/bsd.
|
// https://developers.google.com/open-source/licenses/bsd.
|
||||||
|
|
||||||
// golint lints the Go source files named on its command line.
|
// golint lints the Go source files named on its command line.
|
||||||
package main // import "golang.org/x/lint/golint"
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
|
13
vendor/golang.org/x/lint/golint/importcomment.go
generated
vendored
Normal file
13
vendor/golang.org/x/lint/golint/importcomment.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// Copyright (c) 2018 The Go Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://developers.google.com/open-source/licenses/bsd.
|
||||||
|
|
||||||
|
// +build go1.12
|
||||||
|
|
||||||
|
// Require use of the correct import path only for Go 1.12+ users, so
|
||||||
|
// any breakages coincide with people updating their CI configs or
|
||||||
|
// whatnot.
|
||||||
|
|
||||||
|
package main // import "golang.org/x/lint/golint"
|
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
*~
|
||||||
|
h2i/h2i
|
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
#
|
||||||
|
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||||
|
# a recent nghttp2 build.
|
||||||
|
#
|
||||||
|
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||||
|
# Go tests use this curl binary for integration tests.
|
||||||
|
#
|
||||||
|
|
||||||
|
FROM ubuntu:trusty
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get upgrade -y && \
|
||||||
|
apt-get install -y git-core build-essential wget
|
||||||
|
|
||||||
|
RUN apt-get install -y --no-install-recommends \
|
||||||
|
autotools-dev libtool pkg-config zlib1g-dev \
|
||||||
|
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||||
|
automake autoconf
|
||||||
|
|
||||||
|
# The list of packages nghttp2 recommends for h2load:
|
||||||
|
RUN apt-get install -y --no-install-recommends make binutils \
|
||||||
|
autoconf automake autotools-dev \
|
||||||
|
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||||
|
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||||
|
cython python3.4-dev python-setuptools
|
||||||
|
|
||||||
|
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||||
|
ENV NGHTTP2_VER 895da9a
|
||||||
|
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||||
|
|
||||||
|
WORKDIR /root/nghttp2
|
||||||
|
RUN git reset --hard $NGHTTP2_VER
|
||||||
|
RUN autoreconf -i
|
||||||
|
RUN automake
|
||||||
|
RUN autoconf
|
||||||
|
RUN ./configure
|
||||||
|
RUN make
|
||||||
|
RUN make install
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
||||||
|
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||||
|
WORKDIR /root/curl-7.45.0
|
||||||
|
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||||
|
RUN make
|
||||||
|
RUN make install
|
||||||
|
RUN ldconfig
|
||||||
|
|
||||||
|
CMD ["-h"]
|
||||||
|
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||||
|
|
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
curlimage:
|
||||||
|
docker build -t gohttp2/curl .
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Reference in a new issue