Merge pull request #32 from coreos/grpc-api

Add gRPC service for interacting with storages
This commit is contained in:
Eric Chiang 2016-07-31 23:27:53 -07:00 committed by GitHub
commit 45dafb035d
177 changed files with 39648 additions and 9230 deletions

View file

@ -1,6 +1,7 @@
PROJ="poke" PROJ="poke"
ORG_PATH="github.com/coreos" ORG_PATH="github.com/coreos"
REPO_PATH="$(ORG_PATH)/$(PROJ)" REPO_PATH="$(ORG_PATH)/$(PROJ)"
export PATH := $(PWD)/bin:$(PATH)
export GOBIN=$(PWD)/bin export GOBIN=$(PWD)/bin
export GO15VENDOREXPERIMENT=1 export GO15VENDOREXPERIMENT=1
@ -41,15 +42,22 @@ fmt:
@go fmt $(shell go list ./... | grep -v '/vendor/') @go fmt $(shell go list ./... | grep -v '/vendor/')
lint: lint:
@for package in $(shell go list ./... | grep -v '/vendor/'); do \ @for package in $(shell go list ./... | grep -v '/vendor/' | grep -v 'api/apipb'); do \
golint $$package; \ golint $$package; \
done done
# TODO(ericchiang): Grab protoc as well.
grpc: bin/protoc-gen-go
@protoc --go_out=plugins=grpc:. ./api/apipb/*.proto
bin/protoc-gen-go:
@go install ${REPO_PATH}/vendor/github.com/golang/protobuf/protoc-gen-go
clean: clean:
rm bin/poke bin/pokectl @rm bin/*
testall: testrace vet fmt lint testall: testrace vet fmt lint
FORCE: FORCE:
.PHONY: test testrace vet fmt lint testall .PHONY: test testrace vet fmt lint testall grpc

136
api/api.go Normal file
View file

@ -0,0 +1,136 @@
package api
import (
"errors"
"golang.org/x/net/context"
"github.com/coreos/poke/api/apipb"
"github.com/coreos/poke/storage"
)
// NewServer returns a gRPC server for talking to a storage.
func NewServer(s storage.Storage) apipb.StorageServer {
return &server{s}
}
type server struct {
storage storage.Storage
}
func fromPBClient(client *apipb.Client) storage.Client {
return storage.Client{
ID: client.Id,
Secret: client.Secret,
RedirectURIs: client.RedirectUris,
TrustedPeers: client.TrustedPeers,
Public: client.Public,
Name: client.Name,
LogoURL: client.LogoUrl,
}
}
func toPBClient(client storage.Client) *apipb.Client {
return &apipb.Client{
Id: client.ID,
Secret: client.Secret,
RedirectUris: client.RedirectURIs,
TrustedPeers: client.TrustedPeers,
Public: client.Public,
Name: client.Name,
LogoUrl: client.LogoURL,
}
}
func (s *server) CreateClient(ctx context.Context, req *apipb.CreateClientReq) (*apipb.CreateClientResp, error) {
// TODO(ericchiang): Create a more centralized strategy for creating client IDs
// and secrets which are restricted based on the storage.
client := fromPBClient(req.Client)
if client.ID == "" {
client.ID = storage.NewNonce()
}
if client.Secret == "" {
client.Secret = storage.NewNonce() + storage.NewNonce()
}
if err := s.storage.CreateClient(client); err != nil {
return nil, err
}
return &apipb.CreateClientResp{Client: toPBClient(client)}, nil
}
func (s *server) UpdateClient(ctx context.Context, req *apipb.UpdateClientReq) (*apipb.UpdateClientResp, error) {
switch {
case req.Id == "":
return nil, errors.New("no ID supplied")
case req.MakePublic && req.MakePrivate:
return nil, errors.New("cannot both make public and private")
case req.MakePublic && len(req.RedirectUris) != 0:
return nil, errors.New("redirect uris supplied for a public client")
}
var client *storage.Client
updater := func(old storage.Client) (storage.Client, error) {
if req.MakePublic {
old.Public = true
}
if req.MakePrivate {
old.Public = false
}
if req.Secret != "" {
old.Secret = req.Secret
}
if req.Name != "" {
old.Name = req.Name
}
if req.LogoUrl != "" {
old.LogoURL = req.LogoUrl
}
if len(req.RedirectUris) != 0 {
if old.Public {
return old, errors.New("public clients cannot have redirect URIs")
}
old.RedirectURIs = req.RedirectUris
}
client = &old
return old, nil
}
if err := s.storage.UpdateClient(req.Id, updater); err != nil {
return nil, err
}
return &apipb.UpdateClientResp{Client: toPBClient(*client)}, nil
}
func (s *server) DeleteClient(ctx context.Context, req *apipb.DeleteClientReq) (*apipb.DeleteClientReq, error) {
if req.Id == "" {
return nil, errors.New("no client ID supplied")
}
if err := s.storage.DeleteClient(req.Id); err != nil {
return nil, err
}
return &apipb.DeleteClientReq{}, nil
}
func (s *server) ListClients(ctx context.Context, req *apipb.ListClientsReq) (*apipb.ListClientsResp, error) {
clients, err := s.storage.ListClients()
if err != nil {
return nil, err
}
resp := make([]*apipb.Client, len(clients))
for i, client := range clients {
resp[i] = toPBClient(client)
}
return &apipb.ListClientsResp{Clients: resp}, nil
}
func (s *server) GetClient(ctx context.Context, req *apipb.GetClientReq) (*apipb.GetClientResp, error) {
if req.Id == "" {
return nil, errors.New("no client ID supplied")
}
client, err := s.storage.GetClient(req.Id)
if err != nil {
return nil, err
}
return &apipb.GetClientResp{Client: toPBClient(client)}, nil
}

443
api/apipb/api.pb.go Normal file
View file

@ -0,0 +1,443 @@
// Code generated by protoc-gen-go.
// source: api/apipb/api.proto
// DO NOT EDIT!
/*
Package apipb is a generated protocol buffer package.
It is generated from these files:
api/apipb/api.proto
It has these top-level messages:
Client
CreateClientReq
CreateClientResp
UpdateClientReq
UpdateClientResp
ListClientsReq
ListClientsResp
DeleteClientReq
DeleteClientResp
GetClientReq
GetClientResp
*/
package apipb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Client struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Secret string `protobuf:"bytes,2,opt,name=secret" json:"secret,omitempty"`
RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris" json:"redirect_uris,omitempty"`
TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers" json:"trusted_peers,omitempty"`
Public bool `protobuf:"varint,5,opt,name=public" json:"public,omitempty"`
Name string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"`
LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl" json:"logo_url,omitempty"`
}
func (m *Client) Reset() { *m = Client{} }
func (m *Client) String() string { return proto.CompactTextString(m) }
func (*Client) ProtoMessage() {}
func (*Client) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type CreateClientReq struct {
Client *Client `protobuf:"bytes,1,opt,name=client" json:"client,omitempty"`
}
func (m *CreateClientReq) Reset() { *m = CreateClientReq{} }
func (m *CreateClientReq) String() string { return proto.CompactTextString(m) }
func (*CreateClientReq) ProtoMessage() {}
func (*CreateClientReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *CreateClientReq) GetClient() *Client {
if m != nil {
return m.Client
}
return nil
}
type CreateClientResp struct {
Client *Client `protobuf:"bytes,1,opt,name=client" json:"client,omitempty"`
}
func (m *CreateClientResp) Reset() { *m = CreateClientResp{} }
func (m *CreateClientResp) String() string { return proto.CompactTextString(m) }
func (*CreateClientResp) ProtoMessage() {}
func (*CreateClientResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *CreateClientResp) GetClient() *Client {
if m != nil {
return m.Client
}
return nil
}
type UpdateClientReq struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
// Empty strings indicate that string fields should not be updated.
Secret string `protobuf:"bytes,2,opt,name=secret" json:"secret,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
LogoUrl string `protobuf:"bytes,4,opt,name=logo_url,json=logoUrl" json:"logo_url,omitempty"`
MakePublic bool `protobuf:"varint,5,opt,name=make_public,json=makePublic" json:"make_public,omitempty"`
MakePrivate bool `protobuf:"varint,6,opt,name=make_private,json=makePrivate" json:"make_private,omitempty"`
// If no redirect URIs are specified, the current redirect URIs are preserved.
RedirectUris []string `protobuf:"bytes,7,rep,name=redirect_uris,json=redirectUris" json:"redirect_uris,omitempty"`
}
func (m *UpdateClientReq) Reset() { *m = UpdateClientReq{} }
func (m *UpdateClientReq) String() string { return proto.CompactTextString(m) }
func (*UpdateClientReq) ProtoMessage() {}
func (*UpdateClientReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type UpdateClientResp struct {
Client *Client `protobuf:"bytes,1,opt,name=client" json:"client,omitempty"`
}
func (m *UpdateClientResp) Reset() { *m = UpdateClientResp{} }
func (m *UpdateClientResp) String() string { return proto.CompactTextString(m) }
func (*UpdateClientResp) ProtoMessage() {}
func (*UpdateClientResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *UpdateClientResp) GetClient() *Client {
if m != nil {
return m.Client
}
return nil
}
type ListClientsReq struct {
}
func (m *ListClientsReq) Reset() { *m = ListClientsReq{} }
func (m *ListClientsReq) String() string { return proto.CompactTextString(m) }
func (*ListClientsReq) ProtoMessage() {}
func (*ListClientsReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
type ListClientsResp struct {
Clients []*Client `protobuf:"bytes,1,rep,name=clients" json:"clients,omitempty"`
}
func (m *ListClientsResp) Reset() { *m = ListClientsResp{} }
func (m *ListClientsResp) String() string { return proto.CompactTextString(m) }
func (*ListClientsResp) ProtoMessage() {}
func (*ListClientsResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *ListClientsResp) GetClients() []*Client {
if m != nil {
return m.Clients
}
return nil
}
type DeleteClientReq struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
}
func (m *DeleteClientReq) Reset() { *m = DeleteClientReq{} }
func (m *DeleteClientReq) String() string { return proto.CompactTextString(m) }
func (*DeleteClientReq) ProtoMessage() {}
func (*DeleteClientReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type DeleteClientResp struct {
}
func (m *DeleteClientResp) Reset() { *m = DeleteClientResp{} }
func (m *DeleteClientResp) String() string { return proto.CompactTextString(m) }
func (*DeleteClientResp) ProtoMessage() {}
func (*DeleteClientResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
type GetClientReq struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
}
func (m *GetClientReq) Reset() { *m = GetClientReq{} }
func (m *GetClientReq) String() string { return proto.CompactTextString(m) }
func (*GetClientReq) ProtoMessage() {}
func (*GetClientReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
type GetClientResp struct {
Client *Client `protobuf:"bytes,1,opt,name=client" json:"client,omitempty"`
}
func (m *GetClientResp) Reset() { *m = GetClientResp{} }
func (m *GetClientResp) String() string { return proto.CompactTextString(m) }
func (*GetClientResp) ProtoMessage() {}
func (*GetClientResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *GetClientResp) GetClient() *Client {
if m != nil {
return m.Client
}
return nil
}
func init() {
proto.RegisterType((*Client)(nil), "apipb.Client")
proto.RegisterType((*CreateClientReq)(nil), "apipb.CreateClientReq")
proto.RegisterType((*CreateClientResp)(nil), "apipb.CreateClientResp")
proto.RegisterType((*UpdateClientReq)(nil), "apipb.UpdateClientReq")
proto.RegisterType((*UpdateClientResp)(nil), "apipb.UpdateClientResp")
proto.RegisterType((*ListClientsReq)(nil), "apipb.ListClientsReq")
proto.RegisterType((*ListClientsResp)(nil), "apipb.ListClientsResp")
proto.RegisterType((*DeleteClientReq)(nil), "apipb.DeleteClientReq")
proto.RegisterType((*DeleteClientResp)(nil), "apipb.DeleteClientResp")
proto.RegisterType((*GetClientReq)(nil), "apipb.GetClientReq")
proto.RegisterType((*GetClientResp)(nil), "apipb.GetClientResp")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion3
// Client API for Storage service
type StorageClient interface {
GetClient(ctx context.Context, in *GetClientReq, opts ...grpc.CallOption) (*GetClientResp, error)
CreateClient(ctx context.Context, in *CreateClientReq, opts ...grpc.CallOption) (*CreateClientResp, error)
UpdateClient(ctx context.Context, in *UpdateClientReq, opts ...grpc.CallOption) (*UpdateClientResp, error)
DeleteClient(ctx context.Context, in *DeleteClientReq, opts ...grpc.CallOption) (*DeleteClientReq, error)
ListClients(ctx context.Context, in *ListClientsReq, opts ...grpc.CallOption) (*ListClientsResp, error)
}
type storageClient struct {
cc *grpc.ClientConn
}
func NewStorageClient(cc *grpc.ClientConn) StorageClient {
return &storageClient{cc}
}
func (c *storageClient) GetClient(ctx context.Context, in *GetClientReq, opts ...grpc.CallOption) (*GetClientResp, error) {
out := new(GetClientResp)
err := grpc.Invoke(ctx, "/apipb.Storage/GetClient", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageClient) CreateClient(ctx context.Context, in *CreateClientReq, opts ...grpc.CallOption) (*CreateClientResp, error) {
out := new(CreateClientResp)
err := grpc.Invoke(ctx, "/apipb.Storage/CreateClient", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageClient) UpdateClient(ctx context.Context, in *UpdateClientReq, opts ...grpc.CallOption) (*UpdateClientResp, error) {
out := new(UpdateClientResp)
err := grpc.Invoke(ctx, "/apipb.Storage/UpdateClient", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageClient) DeleteClient(ctx context.Context, in *DeleteClientReq, opts ...grpc.CallOption) (*DeleteClientReq, error) {
out := new(DeleteClientReq)
err := grpc.Invoke(ctx, "/apipb.Storage/DeleteClient", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageClient) ListClients(ctx context.Context, in *ListClientsReq, opts ...grpc.CallOption) (*ListClientsResp, error) {
out := new(ListClientsResp)
err := grpc.Invoke(ctx, "/apipb.Storage/ListClients", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Storage service
type StorageServer interface {
GetClient(context.Context, *GetClientReq) (*GetClientResp, error)
CreateClient(context.Context, *CreateClientReq) (*CreateClientResp, error)
UpdateClient(context.Context, *UpdateClientReq) (*UpdateClientResp, error)
DeleteClient(context.Context, *DeleteClientReq) (*DeleteClientReq, error)
ListClients(context.Context, *ListClientsReq) (*ListClientsResp, error)
}
func RegisterStorageServer(s *grpc.Server, srv StorageServer) {
s.RegisterService(&_Storage_serviceDesc, srv)
}
func _Storage_GetClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetClientReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(StorageServer).GetClient(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/apipb.Storage/GetClient",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(StorageServer).GetClient(ctx, req.(*GetClientReq))
}
return interceptor(ctx, in, info, handler)
}
func _Storage_CreateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateClientReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(StorageServer).CreateClient(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/apipb.Storage/CreateClient",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(StorageServer).CreateClient(ctx, req.(*CreateClientReq))
}
return interceptor(ctx, in, info, handler)
}
func _Storage_UpdateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateClientReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(StorageServer).UpdateClient(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/apipb.Storage/UpdateClient",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(StorageServer).UpdateClient(ctx, req.(*UpdateClientReq))
}
return interceptor(ctx, in, info, handler)
}
func _Storage_DeleteClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteClientReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(StorageServer).DeleteClient(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/apipb.Storage/DeleteClient",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(StorageServer).DeleteClient(ctx, req.(*DeleteClientReq))
}
return interceptor(ctx, in, info, handler)
}
func _Storage_ListClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListClientsReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(StorageServer).ListClients(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/apipb.Storage/ListClients",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(StorageServer).ListClients(ctx, req.(*ListClientsReq))
}
return interceptor(ctx, in, info, handler)
}
var _Storage_serviceDesc = grpc.ServiceDesc{
ServiceName: "apipb.Storage",
HandlerType: (*StorageServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetClient",
Handler: _Storage_GetClient_Handler,
},
{
MethodName: "CreateClient",
Handler: _Storage_CreateClient_Handler,
},
{
MethodName: "UpdateClient",
Handler: _Storage_UpdateClient_Handler,
},
{
MethodName: "DeleteClient",
Handler: _Storage_DeleteClient_Handler,
},
{
MethodName: "ListClients",
Handler: _Storage_ListClients_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: fileDescriptor0,
}
func init() { proto.RegisterFile("api/apipb/api.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 459 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x8f, 0xd2, 0x40,
0x14, 0xa6, 0xc0, 0xb6, 0xf0, 0x28, 0x0b, 0x79, 0xab, 0x6b, 0xe5, 0xa0, 0xec, 0x18, 0x23, 0x27,
0x4c, 0xd6, 0xc4, 0xac, 0x1e, 0x8c, 0x66, 0x4d, 0xbc, 0x78, 0x20, 0x35, 0x9c, 0x49, 0xa1, 0x2f,
0x9b, 0x89, 0xdd, 0xed, 0x30, 0x33, 0xf8, 0xff, 0x3c, 0x7b, 0xf2, 0x1f, 0x99, 0xce, 0xb4, 0x4d,
0x07, 0x64, 0xc3, 0x85, 0xf0, 0xbe, 0xf9, 0xde, 0x7b, 0xf9, 0xbe, 0xef, 0xa5, 0x70, 0x91, 0x08,
0xfe, 0x36, 0x11, 0x5c, 0xac, 0x8b, 0xdf, 0xb9, 0x90, 0xb9, 0xce, 0xf1, 0xcc, 0x00, 0xec, 0xb7,
0x07, 0xfe, 0x6d, 0xc6, 0xe9, 0x41, 0xe3, 0x39, 0xb4, 0x79, 0x1a, 0x79, 0x53, 0x6f, 0xd6, 0x8f,
0xdb, 0x3c, 0xc5, 0x4b, 0xf0, 0x15, 0x6d, 0x24, 0xe9, 0xa8, 0x6d, 0xb0, 0xb2, 0xc2, 0x57, 0x30,
0x94, 0x94, 0x72, 0x49, 0x1b, 0xbd, 0xda, 0x49, 0xae, 0xa2, 0xce, 0xb4, 0x33, 0xeb, 0xc7, 0x61,
0x05, 0x2e, 0x25, 0x57, 0x05, 0x49, 0xcb, 0x9d, 0xd2, 0x94, 0xae, 0x04, 0x91, 0x54, 0x51, 0xd7,
0x92, 0x4a, 0x70, 0x51, 0x60, 0xc5, 0x06, 0xb1, 0x5b, 0x67, 0x7c, 0x13, 0x9d, 0x4d, 0xbd, 0x59,
0x2f, 0x2e, 0x2b, 0x44, 0xe8, 0x3e, 0x24, 0xf7, 0x14, 0xf9, 0x66, 0xaf, 0xf9, 0x8f, 0xcf, 0xa1,
0x97, 0xe5, 0x77, 0xf9, 0x6a, 0x27, 0xb3, 0x28, 0x30, 0x78, 0x50, 0xd4, 0x4b, 0x99, 0xb1, 0x1b,
0x18, 0xdd, 0x4a, 0x4a, 0x34, 0x59, 0x21, 0x31, 0x6d, 0xf1, 0x35, 0xf8, 0x1b, 0x53, 0x18, 0x3d,
0x83, 0xeb, 0xe1, 0xdc, 0xc8, 0x9d, 0x97, 0x8c, 0xf2, 0x91, 0x7d, 0x80, 0xb1, 0xdb, 0xa9, 0xc4,
0xa9, 0xad, 0x7f, 0x3d, 0x18, 0x2d, 0x45, 0xea, 0x6c, 0x3d, 0xd5, 0xc1, 0x4a, 0x5f, 0xe7, 0x88,
0xbe, 0xae, 0xa3, 0x0f, 0x5f, 0xc2, 0xe0, 0x3e, 0xf9, 0x49, 0x2b, 0xc7, 0x2b, 0x28, 0xa0, 0x85,
0xf5, 0xeb, 0x0a, 0x42, 0x4b, 0x90, 0xfc, 0x57, 0xa2, 0xad, 0x6f, 0xbd, 0xd8, 0x34, 0x2d, 0x2c,
0x74, 0x18, 0x5a, 0x70, 0x18, 0x5a, 0x61, 0x87, 0x2b, 0xe9, 0x74, 0x3b, 0xc6, 0x70, 0xfe, 0x9d,
0x2b, 0x6d, 0x51, 0x15, 0xd3, 0x96, 0x7d, 0x84, 0x91, 0x83, 0x28, 0x81, 0x6f, 0x20, 0xb0, 0x74,
0x15, 0x79, 0xd3, 0xce, 0xe1, 0xb0, 0xea, 0x95, 0x5d, 0xc1, 0xe8, 0x2b, 0x65, 0xf4, 0x88, 0xb7,
0x0c, 0x61, 0xec, 0x52, 0x94, 0x60, 0x2f, 0x20, 0xfc, 0x46, 0xfa, 0x78, 0xcf, 0x7b, 0x18, 0x36,
0xde, 0x4f, 0x16, 0x77, 0xfd, 0xa7, 0x0d, 0xc1, 0x0f, 0x9d, 0xcb, 0xe4, 0x8e, 0xf0, 0x06, 0xfa,
0xf5, 0x0c, 0xbc, 0x28, 0xf9, 0xcd, 0xad, 0x93, 0x27, 0x87, 0xa0, 0x12, 0xac, 0x85, 0x5f, 0x20,
0x6c, 0x1e, 0x1b, 0x5e, 0x56, 0xcb, 0xdc, 0xdb, 0x9d, 0x3c, 0xfb, 0x2f, 0x5e, 0x8d, 0x68, 0x06,
0x54, 0x8f, 0xd8, 0x3b, 0xc4, 0x7a, 0xc4, 0x7e, 0x9a, 0xac, 0x85, 0x9f, 0x21, 0x6c, 0xfa, 0x56,
0x8f, 0xd8, 0xf3, 0x7b, 0x72, 0x04, 0x67, 0x2d, 0xfc, 0x04, 0x83, 0x46, 0xb0, 0xf8, 0xb4, 0x24,
0xba, 0xf1, 0xd7, 0xfd, 0x7b, 0x37, 0xc0, 0x5a, 0x6b, 0xdf, 0x7c, 0x80, 0xde, 0xfd, 0x0b, 0x00,
0x00, 0xff, 0xff, 0x76, 0xa0, 0x50, 0x77, 0x97, 0x04, 0x00, 0x00,
}

74
api/apipb/api.proto Normal file
View file

@ -0,0 +1,74 @@
syntax = "proto3";
// Run `make grpc` at the top level directory to regenerate Go source code.
package apipb;
message Client {
string id = 1;
string secret = 2;
repeated string redirect_uris = 3;
repeated string trusted_peers = 4;
bool public = 5;
string name = 6;
string logo_url = 7;
}
message CreateClientReq {
Client client = 1;
}
message CreateClientResp {
Client client = 1;
}
message UpdateClientReq {
string id = 1;
// Empty strings indicate that string fields should not be updated.
string secret = 2;
string name = 3;
string logo_url = 4;
bool make_public = 5;
bool make_private = 6;
// If no redirect URIs are specified, the current redirect URIs are preserved.
repeated string redirect_uris = 7;
}
message UpdateClientResp {
Client client = 1;
}
message ListClientsReq {
}
message ListClientsResp {
repeated Client clients = 1;
}
message DeleteClientReq {
string id = 1;
}
message DeleteClientResp {}
message GetClientReq {
string id = 1;
}
message GetClientResp {
Client client = 1;
}
service Storage {
rpc CreateClient(CreateClientReq) returns (CreateClientResp) {}
rpc DeleteClient(DeleteClientReq) returns (DeleteClientReq) {}
rpc GetClient(GetClientReq) returns (GetClientResp) {}
rpc ListClients(ListClientsReq) returns (ListClientsResp) {}
rpc UpdateClient(UpdateClientReq) returns (UpdateClientResp) {}
}

2
api/doc.go Normal file
View file

@ -0,0 +1,2 @@
// Package api implements a gRPC interface for interacting with a storage.
package api

24
glide.lock generated
View file

@ -1,5 +1,5 @@
hash: 8b33b8abf5bca183ffa7108b03f4001f4d23f7f2e26f773f5bcb4bfefa51a26f hash: 66d8717b42e620d52ceec62e7cb3f2bc23f21bfe35547587338d85564bd330c9
updated: 2016-07-22T23:25:33.173188655-07:00 updated: 2016-07-31T12:19:29.812965492-07:00
imports: imports:
- name: github.com/ericchiang/oidc - name: github.com/ericchiang/oidc
version: 69fec81d167d815f4f455c741b2a94ffaf547ed2 version: 69fec81d167d815f4f455c741b2a94ffaf547ed2
@ -7,6 +7,7 @@ imports:
version: 874264fbbb43f4d91e999fecb4b40143ed611400 version: 874264fbbb43f4d91e999fecb4b40143ed611400
subpackages: subpackages:
- proto - proto
- protoc-gen-go
- name: github.com/gorilla/context - name: github.com/gorilla/context
version: aed02d124ae4a0e94fea4541c8effd05bf0c8296 version: aed02d124ae4a0e94fea4541c8effd05bf0c8296
- name: github.com/gorilla/mux - name: github.com/gorilla/mux
@ -31,12 +32,18 @@ imports:
- bcrypt - bcrypt
- blowfish - blowfish
- name: golang.org/x/net - name: golang.org/x/net
version: d7bf3545bb0dacf009c535b3d3fbf53ac0a339ab version: 6a513affb38dc9788b449d59ffed099b8de18fa0
subpackages: subpackages:
- context - context
- http2
- http2/hpack
- trace
- lex/httplex
- internal/timeseries
- name: golang.org/x/oauth2 - name: golang.org/x/oauth2
version: 08c8d727d2392d18286f9f88ad775ad98f09ab33 version: 08c8d727d2392d18286f9f88ad775ad98f09ab33
subpackages: subpackages:
- github
- internal - internal
- name: google.golang.org/appengine - name: google.golang.org/appengine
version: 267c27e7492265b84fc6719503b14a1e17975d79 version: 267c27e7492265b84fc6719503b14a1e17975d79
@ -48,6 +55,17 @@ imports:
- internal/datastore - internal/datastore
- internal/log - internal/log
- internal/remote_api - internal/remote_api
- name: google.golang.org/grpc
version: 13edeeffdea7a41d5aad96c28deb4c7bd01a9397
subpackages:
- codes
- credentials
- grpclog
- internal
- metadata
- naming
- transport
- peer
- name: gopkg.in/asn1-ber.v1 - name: gopkg.in/asn1-ber.v1
version: 4e86f4367175e39f69d9358a5f17b4dda270378d version: 4e86f4367175e39f69d9358a5f17b4dda270378d
- name: gopkg.in/ldap.v2 - name: gopkg.in/ldap.v2

View file

@ -1,5 +1,9 @@
# NOTE(ericchiang): Create a separate block for each dependency so it's clear
# which dependencies require which transitive dependencies.
package: github.com/coreos/poke package: github.com/coreos/poke
import: import:
- package: github.com/spf13/cobra - package: github.com/spf13/cobra
version: bc81c21bd0d8be5ba2d6630a505d79d4467566e7 version: bc81c21bd0d8be5ba2d6630a505d79d4467566e7
- package: github.com/spf13/pflag - package: github.com/spf13/pflag
@ -21,9 +25,6 @@ import:
- package: gopkg.in/yaml.v2 - package: gopkg.in/yaml.v2
version: a83829b6f1293c91addabc89d0571c246397bbf4 version: a83829b6f1293c91addabc89d0571c246397bbf4
- package: golang.org/x/net/context
version: d7bf3545bb0dacf009c535b3d3fbf53ac0a339ab
- package: github.com/gorilla/mux - package: github.com/gorilla/mux
version: 9fa818a44c2bf1396a17f9d5a3c0f6dd39d2ff8e version: 9fa818a44c2bf1396a17f9d5a3c0f6dd39d2ff8e
- package: github.com/gorilla/context - package: github.com/gorilla/context
@ -44,6 +45,8 @@ import:
version: v1.0.2 version: v1.0.2
- package: golang.org/x/oauth2 - package: golang.org/x/oauth2
version: 08c8d727d2392d18286f9f88ad775ad98f09ab33 version: 08c8d727d2392d18286f9f88ad775ad98f09ab33
# Not actually imported but glide detects it. Consider adding subpackages to
# the oauth2 package to eliminate.
- package: google.golang.org/appengine - package: google.golang.org/appengine
version: 267c27e7492265b84fc6719503b14a1e17975d79 version: 267c27e7492265b84fc6719503b14a1e17975d79
subpackages: subpackages:
@ -54,10 +57,33 @@ import:
- internal/datastore - internal/datastore
- internal/log - internal/log
- internal/remote_api - internal/remote_api
# Go's protobuf generator is also a direct dependency of this repo.
- package: github.com/golang/protobuf - package: github.com/golang/protobuf
version: 874264fbbb43f4d91e999fecb4b40143ed611400 version: 874264fbbb43f4d91e999fecb4b40143ed611400
subpackages: subpackages:
- proto - proto
- protoc-gen-go
- package: github.com/mitchellh/go-homedir - package: github.com/mitchellh/go-homedir
verison: 756f7b183b7ab78acdbbee5c7f392838ed459dda verison: 756f7b183b7ab78acdbbee5c7f392838ed459dda
- package: google.golang.org/grpc
version: v1.0.0
subpackages:
- codes
- credentials
- grpclog
- internal
- metadata
- naming
- transport
- peer
- package: golang.org/x/net
version: 6a513affb38dc9788b449d59ffed099b8de18fa0
subpackages:
- context # context is also imported directly by this repo.
- http2
- http2/hpack
- trace
- lex/httplex
- internal/timeseries

View file

@ -15,6 +15,9 @@ const keysName = "openid-connect-keys"
// Client is a mirrored struct from storage with JSON struct tags and // Client is a mirrored struct from storage with JSON struct tags and
// Kubernetes type metadata. // Kubernetes type metadata.
//
// TODO(ericchiang): Kubernetes has an extremely restricted set of characters it can use for IDs.
// Consider base32ing client IDs.
type Client struct { type Client struct {
k8sapi.TypeMeta `json:",inline"` k8sapi.TypeMeta `json:",inline"`
k8sapi.ObjectMeta `json:"metadata,omitempty"` k8sapi.ObjectMeta `json:"metadata,omitempty"`

View file

@ -77,13 +77,14 @@ func Open(driverName string, config map[string]string) (Storage, error) {
type Storage interface { type Storage interface {
Close() error Close() error
// TODO(ericchiang): Let the storages set the IDs of these objects.
CreateAuthRequest(a AuthRequest) error CreateAuthRequest(a AuthRequest) error
CreateClient(c Client) error CreateClient(c Client) error
CreateAuthCode(c AuthCode) error CreateAuthCode(c AuthCode) error
CreateRefresh(r Refresh) error CreateRefresh(r Refresh) error
// TODO(ericchiang): return (T, bool, error) so we can indicate not found // TODO(ericchiang): return (T, bool, error) so we can indicate not found
// requests that way. // requests that way instead of using ErrNotFound.
GetAuthRequest(id string) (AuthRequest, error) GetAuthRequest(id string) (AuthRequest, error)
GetAuthCode(id string) (AuthCode, error) GetAuthCode(id string) (AuthCode, error)
GetClient(id string) (Client, error) GetClient(id string) (Client, error)

3
vendor/golang.org/x/net/bpf/doc.go generated vendored
View file

@ -5,7 +5,8 @@
/* /*
Package bpf implements marshaling and unmarshaling of programs for the Package bpf implements marshaling and unmarshaling of programs for the
Berkeley Packet Filter virtual machine. Berkeley Packet Filter virtual machine, and provides a Go implementation
of the virtual machine.
BPF's main use is to specify a packet filter for network taps, so that BPF's main use is to specify a packet filter for network taps, so that
the kernel doesn't have to expensively copy every packet it sees to the kernel doesn't have to expensively copy every packet it sees to

140
vendor/golang.org/x/net/bpf/vm.go generated vendored Normal file
View file

@ -0,0 +1,140 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf
import (
"errors"
"fmt"
)
// A VM is an emulated BPF virtual machine.
type VM struct {
filter []Instruction
}
// NewVM returns a new VM using the input BPF program.
func NewVM(filter []Instruction) (*VM, error) {
if len(filter) == 0 {
return nil, errors.New("one or more Instructions must be specified")
}
for i, ins := range filter {
check := len(filter) - (i + 1)
switch ins := ins.(type) {
// Check for out-of-bounds jumps in instructions
case Jump:
if check <= int(ins.Skip) {
return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
}
case JumpIf:
if check <= int(ins.SkipTrue) {
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
}
if check <= int(ins.SkipFalse) {
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
}
// Check for division or modulus by zero
case ALUOpConstant:
if ins.Val != 0 {
break
}
switch ins.Op {
case ALUOpDiv, ALUOpMod:
return nil, errors.New("cannot divide by zero using ALUOpConstant")
}
// Check for unknown extensions
case LoadExtension:
switch ins.Num {
case ExtLen:
default:
return nil, fmt.Errorf("extension %d not implemented", ins.Num)
}
}
}
// Make sure last instruction is a return instruction
switch filter[len(filter)-1].(type) {
case RetA, RetConstant:
default:
return nil, errors.New("BPF program must end with RetA or RetConstant")
}
// Though our VM works using disassembled instructions, we
// attempt to assemble the input filter anyway to ensure it is compatible
// with an operating system VM.
_, err := Assemble(filter)
return &VM{
filter: filter,
}, err
}
// Run runs the VM's BPF program against the input bytes.
// Run returns the number of bytes accepted by the BPF program, and any errors
// which occurred while processing the program.
func (v *VM) Run(in []byte) (int, error) {
var (
// Registers of the virtual machine
regA uint32
regX uint32
regScratch [16]uint32
// OK is true if the program should continue processing the next
// instruction, or false if not, causing the loop to break
ok = true
)
// TODO(mdlayher): implement:
// - NegateA:
// - would require a change from uint32 registers to int32
// registers
// TODO(mdlayher): add interop tests that check signedness of ALU
// operations against kernel implementation, and make sure Go
// implementation matches behavior
for i := 0; i < len(v.filter) && ok; i++ {
ins := v.filter[i]
switch ins := ins.(type) {
case ALUOpConstant:
regA = aluOpConstant(ins, regA)
case ALUOpX:
regA, ok = aluOpX(ins, regA, regX)
case Jump:
i += int(ins.Skip)
case JumpIf:
jump := jumpIf(ins, regA)
i += jump
case LoadAbsolute:
regA, ok = loadAbsolute(ins, in)
case LoadConstant:
regA, regX = loadConstant(ins, regA, regX)
case LoadExtension:
regA = loadExtension(ins, in)
case LoadIndirect:
regA, ok = loadIndirect(ins, in, regX)
case LoadMemShift:
regX, ok = loadMemShift(ins, in)
case LoadScratch:
regA, regX = loadScratch(ins, regScratch, regA, regX)
case RetA:
return int(regA), nil
case RetConstant:
return int(ins.Val), nil
case StoreScratch:
regScratch = storeScratch(ins, regScratch, regA, regX)
case TAX:
regX = regA
case TXA:
regA = regX
default:
return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
}
}
return 0, nil
}

512
vendor/golang.org/x/net/bpf/vm_aluop_test.go generated vendored Normal file
View file

@ -0,0 +1,512 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"testing"
"golang.org/x/net/bpf"
)
func TestVMALUOpAdd(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpAdd,
Val: 3,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
8, 2, 3,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 3, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpSub(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.TAX{},
bpf.ALUOpX{
Op: bpf.ALUOpSub,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
1, 2, 3,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpMul(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpMul,
Val: 2,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
6, 2, 3, 4,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 4, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpDiv(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpDiv,
Val: 2,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
20, 2, 3, 4,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 2, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpDivByZeroALUOpConstant(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.ALUOpConstant{
Op: bpf.ALUOpDiv,
Val: 0,
},
bpf.RetA{},
})
if errStr(err) != "cannot divide by zero using ALUOpConstant" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMALUOpDivByZeroALUOpX(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
// Load byte 0 into X
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.TAX{},
// Load byte 1 into A
bpf.LoadAbsolute{
Off: 9,
Size: 1,
},
// Attempt to perform 1/0
bpf.ALUOpX{
Op: bpf.ALUOpDiv,
},
// Return 4 bytes if program does not terminate
bpf.LoadConstant{
Val: 12,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 3, 4,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpOr(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 2,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpOr,
Val: 0x01,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0x00, 0x10, 0x03, 0x04,
0x05, 0x06, 0x07, 0x08,
0x09, 0xff,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 9, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpAnd(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 2,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpAnd,
Val: 0x0019,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xaa, 0x09,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpShiftLeft(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpShiftLeft,
Val: 0x01,
},
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: 0x02,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 9,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0x01, 0xaa,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpShiftRight(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpShiftRight,
Val: 0x01,
},
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: 0x04,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 9,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0x08, 0xff, 0xff,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpMod(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpMod,
Val: 20,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
30, 0, 0,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 2, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpModByZeroALUOpConstant(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpMod,
Val: 0,
},
bpf.RetA{},
})
if errStr(err) != "cannot divide by zero using ALUOpConstant" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMALUOpModByZeroALUOpX(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
// Load byte 0 into X
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.TAX{},
// Load byte 1 into A
bpf.LoadAbsolute{
Off: 9,
Size: 1,
},
// Attempt to perform 1%0
bpf.ALUOpX{
Op: bpf.ALUOpMod,
},
// Return 4 bytes if program does not terminate
bpf.LoadConstant{
Val: 12,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 3, 4,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpXor(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpXor,
Val: 0x0a,
},
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: 0x01,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 9,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0x0b, 0x00, 0x00, 0x00,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMALUOpUnknown(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.ALUOpConstant{
Op: bpf.ALUOpAdd,
Val: 1,
},
// Verify that an unknown operation is a no-op
bpf.ALUOpConstant{
Op: 100,
},
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: 0x02,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 9,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
1,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}

192
vendor/golang.org/x/net/bpf/vm_bpf_test.go generated vendored Normal file
View file

@ -0,0 +1,192 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"net"
"runtime"
"testing"
"time"
"golang.org/x/net/bpf"
"golang.org/x/net/ipv4"
)
// A virtualMachine is a BPF virtual machine which can process an
// input packet against a BPF program and render a verdict.
type virtualMachine interface {
Run(in []byte) (int, error)
}
// canUseOSVM indicates if the OS BPF VM is available on this platform.
func canUseOSVM() bool {
// OS BPF VM can only be used on platforms where x/net/ipv4 supports
// attaching a BPF program to a socket.
switch runtime.GOOS {
case "linux":
return true
}
return false
}
// All BPF tests against both the Go VM and OS VM are assumed to
// be used with a UDP socket. As a result, the entire contents
// of a UDP datagram is sent through the BPF program, but only
// the body after the UDP header will ever be returned in output.
// testVM sets up a Go BPF VM, and if available, a native OS BPF VM
// for integration testing.
func testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) {
goVM, err := bpf.NewVM(filter)
if err != nil {
// Some tests expect an error, so this error must be returned
// instead of fatally exiting the test
return nil, nil, err
}
mvm := &multiVirtualMachine{
goVM: goVM,
t: t,
}
// If available, add the OS VM for tests which verify that both the Go
// VM and OS VM have exactly the same output for the same input program
// and packet.
done := func() {}
if canUseOSVM() {
osVM, osVMDone := testOSVM(t, filter)
done = func() { osVMDone() }
mvm.osVM = osVM
}
return mvm, done, nil
}
// udpHeaderLen is the length of a UDP header.
const udpHeaderLen = 8
// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM
// and the native OS VM, if the OS VM is available.
type multiVirtualMachine struct {
goVM virtualMachine
osVM virtualMachine
t *testing.T
}
func (mvm *multiVirtualMachine) Run(in []byte) (int, error) {
if len(in) < udpHeaderLen {
mvm.t.Fatalf("input must be at least length of UDP header (%d), got: %d",
udpHeaderLen, len(in))
}
// All tests have a UDP header as part of input, because the OS VM
// packets always will. For the Go VM, this output is trimmed before
// being sent back to tests.
goOut, goErr := mvm.goVM.Run(in)
if goOut >= udpHeaderLen {
goOut -= udpHeaderLen
}
// If Go output is larger than the size of the packet, packet filtering
// interop tests must trim the output bytes to the length of the packet.
// The BPF VM should not do this on its own, as other uses of it do
// not trim the output byte count.
trim := len(in) - udpHeaderLen
if goOut > trim {
goOut = trim
}
// When the OS VM is not available, process using the Go VM alone
if mvm.osVM == nil {
return goOut, goErr
}
// The OS VM will apply its own UDP header, so remove the pseudo header
// that the Go VM needs.
osOut, err := mvm.osVM.Run(in[udpHeaderLen:])
if err != nil {
mvm.t.Fatalf("error while running OS VM: %v", err)
}
// Verify both VMs return same number of bytes
var mismatch bool
if goOut != osOut {
mismatch = true
mvm.t.Logf("output byte count does not match:\n- go: %v\n- os: %v", goOut, osOut)
}
if mismatch {
mvm.t.Fatal("Go BPF and OS BPF packet outputs do not match")
}
return goOut, goErr
}
// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for
// processing BPF programs.
type osVirtualMachine struct {
l net.PacketConn
s net.Conn
}
// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting
// packets into a UDP listener with a BPF program attached to it.
func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) {
l, err := net.ListenPacket("udp4", "127.0.0.1:0")
if err != nil {
t.Fatalf("failed to open OS VM UDP listener: %v", err)
}
prog, err := bpf.Assemble(filter)
if err != nil {
t.Fatalf("failed to compile BPF program: %v", err)
}
p := ipv4.NewPacketConn(l)
if err = p.SetBPF(prog); err != nil {
t.Fatalf("failed to attach BPF program to listener: %v", err)
}
s, err := net.Dial("udp4", l.LocalAddr().String())
if err != nil {
t.Fatalf("failed to dial connection to listener: %v", err)
}
done := func() {
_ = s.Close()
_ = l.Close()
}
return &osVirtualMachine{
l: l,
s: s,
}, done
}
// Run sends the input bytes into the OS's BPF VM and returns its verdict.
func (vm *osVirtualMachine) Run(in []byte) (int, error) {
go func() {
_, _ = vm.s.Write(in)
}()
vm.l.SetDeadline(time.Now().Add(50 * time.Millisecond))
var b [512]byte
n, _, err := vm.l.ReadFrom(b[:])
if err != nil {
// A timeout indicates that BPF filtered out the packet, and thus,
// no input should be returned.
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
return n, nil
}
return n, err
}
return n, nil
}

49
vendor/golang.org/x/net/bpf/vm_extension_test.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"testing"
"golang.org/x/net/bpf"
)
func TestVMLoadExtensionNotImplemented(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.LoadExtension{
Num: 100,
},
bpf.RetA{},
})
if errStr(err) != "extension 100 not implemented" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMLoadExtensionExtLen(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadExtension{
Num: bpf.ExtLen,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 2, 3,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 4, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}

174
vendor/golang.org/x/net/bpf/vm_instructions.go generated vendored Normal file
View file

@ -0,0 +1,174 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf
import (
"encoding/binary"
"fmt"
)
func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {
return aluOpCommon(ins.Op, regA, ins.Val)
}
func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {
// Guard against division or modulus by zero by terminating
// the program, as the OS BPF VM does
if regX == 0 {
switch ins.Op {
case ALUOpDiv, ALUOpMod:
return 0, false
}
}
return aluOpCommon(ins.Op, regA, regX), true
}
func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {
switch op {
case ALUOpAdd:
return regA + value
case ALUOpSub:
return regA - value
case ALUOpMul:
return regA * value
case ALUOpDiv:
// Division by zero not permitted by NewVM and aluOpX checks
return regA / value
case ALUOpOr:
return regA | value
case ALUOpAnd:
return regA & value
case ALUOpShiftLeft:
return regA << value
case ALUOpShiftRight:
return regA >> value
case ALUOpMod:
// Modulus by zero not permitted by NewVM and aluOpX checks
return regA % value
case ALUOpXor:
return regA ^ value
default:
return regA
}
}
func jumpIf(ins JumpIf, value uint32) int {
var ok bool
inV := uint32(ins.Val)
switch ins.Cond {
case JumpEqual:
ok = value == inV
case JumpNotEqual:
ok = value != inV
case JumpGreaterThan:
ok = value > inV
case JumpLessThan:
ok = value < inV
case JumpGreaterOrEqual:
ok = value >= inV
case JumpLessOrEqual:
ok = value <= inV
case JumpBitsSet:
ok = (value & inV) != 0
case JumpBitsNotSet:
ok = (value & inV) == 0
}
if ok {
return int(ins.SkipTrue)
}
return int(ins.SkipFalse)
}
func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
offset := int(ins.Off)
size := int(ins.Size)
return loadCommon(in, offset, size)
}
func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {
switch ins.Dst {
case RegA:
regA = ins.Val
case RegX:
regX = ins.Val
}
return regA, regX
}
func loadExtension(ins LoadExtension, in []byte) uint32 {
switch ins.Num {
case ExtLen:
return uint32(len(in))
default:
panic(fmt.Sprintf("unimplemented extension: %d", ins.Num))
}
}
func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
offset := int(ins.Off) + int(regX)
size := int(ins.Size)
return loadCommon(in, offset, size)
}
func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {
offset := int(ins.Off)
if !inBounds(len(in), offset, 0) {
return 0, false
}
// Mask off high 4 bits and multiply low 4 bits by 4
return uint32(in[offset]&0x0f) * 4, true
}
func inBounds(inLen int, offset int, size int) bool {
return offset+size <= inLen
}
func loadCommon(in []byte, offset int, size int) (uint32, bool) {
if !inBounds(len(in), offset, size) {
return 0, false
}
switch size {
case 1:
return uint32(in[offset]), true
case 2:
return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true
case 4:
return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true
default:
panic(fmt.Sprintf("invalid load size: %d", size))
}
}
func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {
switch ins.Dst {
case RegA:
regA = regScratch[ins.N]
case RegX:
regX = regScratch[ins.N]
}
return regA, regX
}
func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {
switch ins.Src {
case RegA:
regScratch[ins.N] = regA
case RegX:
regScratch[ins.N] = regX
}
return regScratch
}

380
vendor/golang.org/x/net/bpf/vm_jump_test.go generated vendored Normal file
View file

@ -0,0 +1,380 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"testing"
"golang.org/x/net/bpf"
)
func TestVMJumpOne(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.Jump{
Skip: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 9,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
1,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMJumpOutOfProgram(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.Jump{
Skip: 1,
},
bpf.RetA{},
})
if errStr(err) != "cannot jump 1 instructions; jumping past program bounds" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMJumpIfTrueOutOfProgram(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.JumpIf{
Cond: bpf.JumpEqual,
SkipTrue: 2,
},
bpf.RetA{},
})
if errStr(err) != "cannot jump 2 instructions in true case; jumping past program bounds" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMJumpIfFalseOutOfProgram(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.JumpIf{
Cond: bpf.JumpEqual,
SkipFalse: 3,
},
bpf.RetA{},
})
if errStr(err) != "cannot jump 3 instructions in false case; jumping past program bounds" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMJumpIfEqual(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: 1,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 9,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
1,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMJumpIfNotEqual(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.JumpIf{
Cond: bpf.JumpNotEqual,
Val: 1,
SkipFalse: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 9,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
1,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMJumpIfGreaterThan(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 4,
},
bpf.JumpIf{
Cond: bpf.JumpGreaterThan,
Val: 0x00010202,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 12,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 2, 3,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 4, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMJumpIfLessThan(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 4,
},
bpf.JumpIf{
Cond: bpf.JumpLessThan,
Val: 0xff010203,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 12,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 2, 3,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 4, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMJumpIfGreaterOrEqual(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 4,
},
bpf.JumpIf{
Cond: bpf.JumpGreaterOrEqual,
Val: 0x00010203,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 12,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 2, 3,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 4, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMJumpIfLessOrEqual(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 4,
},
bpf.JumpIf{
Cond: bpf.JumpLessOrEqual,
Val: 0xff010203,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 12,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 2, 3,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 4, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMJumpIfBitsSet(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 2,
},
bpf.JumpIf{
Cond: bpf.JumpBitsSet,
Val: 0x1122,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 10,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0x01, 0x02,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 2, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMJumpIfBitsNotSet(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 2,
},
bpf.JumpIf{
Cond: bpf.JumpBitsNotSet,
Val: 0x1221,
SkipTrue: 1,
},
bpf.RetConstant{
Val: 0,
},
bpf.RetConstant{
Val: 10,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0x01, 0x02,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 2, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}

246
vendor/golang.org/x/net/bpf/vm_load_test.go generated vendored Normal file
View file

@ -0,0 +1,246 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"net"
"testing"
"golang.org/x/net/bpf"
"golang.org/x/net/ipv4"
)
func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 100,
Size: 2,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 2, 3,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 2,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Size: 5,
},
bpf.RetA{},
})
if errStr(err) != "assembling instruction 1: invalid load byte length 0" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMLoadConstantOK(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadConstant{
Dst: bpf.RegX,
Val: 9,
},
bpf.TXA{},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadIndirectOutOfBounds(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadIndirect{
Off: 100,
Size: 1,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadMemShiftOutOfBounds(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadMemShift{
Off: 100,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
const (
dhcp4Port = 53
)
func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) {
vm, in, done := testDHCPv4(t)
defer done()
// Append mostly empty UDP header with incorrect DHCPv4 port
in = append(in, []byte{
0, 0,
0, dhcp4Port + 1,
0, 0,
0, 0,
}...)
out, err := vm.Run(in)
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) {
vm, in, done := testDHCPv4(t)
defer done()
// Append mostly empty UDP header with correct DHCPv4 port
in = append(in, []byte{
0, 0,
0, dhcp4Port,
0, 0,
0, 0,
}...)
out, err := vm.Run(in)
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := len(in)-8, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) {
// DHCPv4 test data courtesy of David Anderson:
// https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70
vm, done, err := testVM(t, []bpf.Instruction{
// Load IPv4 packet length
bpf.LoadMemShift{Off: 8},
// Get UDP dport
bpf.LoadIndirect{Off: 8 + 2, Size: 2},
// Correct dport?
bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1},
// Accept
bpf.RetConstant{Val: 1500},
// Ignore
bpf.RetConstant{Val: 0},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
// Minimal requirements to make a valid IPv4 header
h := &ipv4.Header{
Len: ipv4.HeaderLen,
Src: net.IPv4(192, 168, 1, 1),
Dst: net.IPv4(192, 168, 1, 2),
}
hb, err := h.Marshal()
if err != nil {
t.Fatalf("failed to marshal IPv4 header: %v", err)
}
hb = append([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
}, hb...)
return vm, hb, done
}

115
vendor/golang.org/x/net/bpf/vm_ret_test.go generated vendored Normal file
View file

@ -0,0 +1,115 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"testing"
"golang.org/x/net/bpf"
)
func TestVMRetA(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
9,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMRetALargerThanInput(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: 8,
Size: 2,
},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 255,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 2, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMRetConstant(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.RetConstant{
Val: 9,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMRetConstantLargerThanInput(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.RetConstant{
Val: 16,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 2, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}

247
vendor/golang.org/x/net/bpf/vm_scratch_test.go generated vendored Normal file
View file

@ -0,0 +1,247 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"testing"
"golang.org/x/net/bpf"
)
func TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.StoreScratch{
Src: bpf.RegA,
N: -1,
},
bpf.RetA{},
})
if errStr(err) != "assembling instruction 1: invalid scratch slot -1" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.StoreScratch{
Src: bpf.RegA,
N: 16,
},
bpf.RetA{},
})
if errStr(err) != "assembling instruction 1: invalid scratch slot 16" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMStoreScratchUnknownSourceRegister(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.StoreScratch{
Src: 100,
N: 0,
},
bpf.RetA{},
})
if errStr(err) != "assembling instruction 1: invalid source register 100" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.LoadScratch{
Dst: bpf.RegX,
N: -1,
},
bpf.RetA{},
})
if errStr(err) != "assembling instruction 1: invalid scratch slot -1" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.LoadScratch{
Dst: bpf.RegX,
N: 16,
},
bpf.RetA{},
})
if errStr(err) != "assembling instruction 1: invalid scratch slot 16" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMLoadScratchUnknownDestinationRegister(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.LoadScratch{
Dst: 100,
N: 0,
},
bpf.RetA{},
})
if errStr(err) != "assembling instruction 1: invalid target register 100" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMStoreScratchLoadScratchOneValue(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
// Load byte 255
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
// Copy to X and store in scratch[0]
bpf.TAX{},
bpf.StoreScratch{
Src: bpf.RegX,
N: 0,
},
// Load byte 1
bpf.LoadAbsolute{
Off: 9,
Size: 1,
},
// Overwrite 1 with 255 from scratch[0]
bpf.LoadScratch{
Dst: bpf.RegA,
N: 0,
},
// Return 255
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
255, 1, 2,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 3, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
// Load byte 10
bpf.LoadAbsolute{
Off: 8,
Size: 1,
},
// Store in scratch[0]
bpf.StoreScratch{
Src: bpf.RegA,
N: 0,
},
// Load byte 20
bpf.LoadAbsolute{
Off: 9,
Size: 1,
},
// Store in scratch[1]
bpf.StoreScratch{
Src: bpf.RegA,
N: 1,
},
// Load byte 30
bpf.LoadAbsolute{
Off: 10,
Size: 1,
},
// Store in scratch[2]
bpf.StoreScratch{
Src: bpf.RegA,
N: 2,
},
// Load byte 1
bpf.LoadAbsolute{
Off: 11,
Size: 1,
},
// Store in scratch[3]
bpf.StoreScratch{
Src: bpf.RegA,
N: 3,
},
// Load in byte 10 to X
bpf.LoadScratch{
Dst: bpf.RegX,
N: 0,
},
// Copy X -> A
bpf.TXA{},
// Verify value is 10
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: 10,
SkipTrue: 1,
},
// Fail test if incorrect
bpf.RetConstant{
Val: 0,
},
// Load in byte 20 to A
bpf.LoadScratch{
Dst: bpf.RegA,
N: 1,
},
// Verify value is 20
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: 20,
SkipTrue: 1,
},
// Fail test if incorrect
bpf.RetConstant{
Val: 0,
},
// Load in byte 30 to A
bpf.LoadScratch{
Dst: bpf.RegA,
N: 2,
},
// Verify value is 30
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: 30,
SkipTrue: 1,
},
// Fail test if incorrect
bpf.RetConstant{
Val: 0,
},
// Return first two bytes on success
bpf.RetConstant{
Val: 10,
},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
10, 20, 30, 1,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 2, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}

144
vendor/golang.org/x/net/bpf/vm_test.go generated vendored Normal file
View file

@ -0,0 +1,144 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"fmt"
"testing"
"golang.org/x/net/bpf"
)
var _ bpf.Instruction = unknown{}
type unknown struct{}
func (unknown) Assemble() (bpf.RawInstruction, error) {
return bpf.RawInstruction{}, nil
}
func TestVMUnknownInstruction(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadConstant{
Dst: bpf.RegA,
Val: 100,
},
// Should terminate the program with an error immediately
unknown{},
bpf.RetA{},
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer done()
_, err = vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0x00, 0x00,
})
if errStr(err) != "unknown Instruction at index 1: bpf_test.unknown" {
t.Fatalf("unexpected error while running program: %v", err)
}
}
func TestVMNoReturnInstruction(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.LoadConstant{
Dst: bpf.RegA,
Val: 1,
},
})
if errStr(err) != "BPF program must end with RetA or RetConstant" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMNoInputInstructions(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{})
if errStr(err) != "one or more Instructions must be specified" {
t.Fatalf("unexpected error: %v", err)
}
}
// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame
// as input and checking its EtherType to determine if it should be accepted.
func ExampleNewVM() {
// Offset | Length | Comment
// -------------------------
// 00 | 06 | Ethernet destination MAC address
// 06 | 06 | Ethernet source MAC address
// 12 | 02 | Ethernet EtherType
const (
etOff = 12
etLen = 2
etARP = 0x0806
)
// Set up a VM to filter traffic based on if its EtherType
// matches the ARP EtherType.
vm, err := bpf.NewVM([]bpf.Instruction{
// Load EtherType value from Ethernet header
bpf.LoadAbsolute{
Off: etOff,
Size: etLen,
},
// If EtherType is equal to the ARP EtherType, jump to allow
// packet to be accepted
bpf.JumpIf{
Cond: bpf.JumpEqual,
Val: etARP,
SkipTrue: 1,
},
// EtherType does not match the ARP EtherType
bpf.RetConstant{
Val: 0,
},
// EtherType matches the ARP EtherType, accept up to 1500
// bytes of packet
bpf.RetConstant{
Val: 1500,
},
})
if err != nil {
panic(fmt.Sprintf("failed to load BPF program: %v", err))
}
// Create an Ethernet frame with the ARP EtherType for testing
frame := []byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x11, 0x22, 0x33, 0x44, 0x55,
0x08, 0x06,
// Payload omitted for brevity
}
// Run our VM's BPF program using the Ethernet frame as input
out, err := vm.Run(frame)
if err != nil {
panic(fmt.Sprintf("failed to accept Ethernet frame: %v", err))
}
// BPF VM can return a byte count greater than the number of input
// bytes, so trim the output to match the input byte length
if out > len(frame) {
out = len(frame)
}
fmt.Printf("out: %d bytes", out)
// Output:
// out: 14 bytes
}
// errStr returns the string representation of an error, or
// "<nil>" if it is nil.
func errStr(err error) string {
if err == nil {
return "<nil>"
}
return err.Error()
}

View file

@ -1,7 +1,9 @@
// Copyright 2015 The Go Authors. All rights reserved. // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build go1.7
// Package ctxhttp provides helper functions for performing context-aware HTTP requests. // Package ctxhttp provides helper functions for performing context-aware HTTP requests.
package ctxhttp // import "golang.org/x/net/context/ctxhttp" package ctxhttp // import "golang.org/x/net/context/ctxhttp"
@ -14,77 +16,28 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
) )
func nop() {} // Do sends an HTTP request with the provided http.Client and returns
// an HTTP response.
var ( //
testHookContextDoneBeforeHeaders = nop
testHookDoReturned = nop
testHookDidBodyClose = nop
)
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used. // If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned. //
// The provided ctx must be non-nil. If it is canceled or times out,
// ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil { if client == nil {
client = http.DefaultClient client = http.DefaultClient
} }
resp, err := client.Do(req.WithContext(ctx))
// TODO(djd): Respect any existing value of req.Cancel. // If we got an error, and the context has been canceled,
cancel := make(chan struct{}) // the context's error is probably more useful.
req.Cancel = cancel if err != nil {
type responseAndError struct {
resp *http.Response
err error
}
result := make(chan responseAndError, 1)
// Make local copies of test hooks closed over by goroutines below.
// Prevents data races in tests.
testHookDoReturned := testHookDoReturned
testHookDidBodyClose := testHookDidBodyClose
go func() {
resp, err := client.Do(req)
testHookDoReturned()
result <- responseAndError{resp, err}
}()
var resp *http.Response
select {
case <-ctx.Done():
testHookContextDoneBeforeHeaders()
close(cancel)
// Clean up after the goroutine calling client.Do:
go func() {
if r := <-result; r.resp != nil {
testHookDidBodyClose()
r.resp.Body.Close()
}
}()
return nil, ctx.Err()
case r := <-result:
var err error
resp, err = r.resp, r.err
if err != nil {
return resp, err
}
}
c := make(chan struct{})
go func() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
close(cancel) err = ctx.Err()
case <-c: default:
// The response's Body is closed.
} }
}() }
resp.Body = &notifyingReader{resp.Body, c} return resp, err
return resp, nil
} }
// Get issues a GET request via the Do function. // Get issues a GET request via the Do function.
@ -119,28 +72,3 @@ func Post(ctx context.Context, client *http.Client, url string, bodyType string,
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
} }
// notifyingReader is an io.ReadCloser that closes the notify channel after
// Close is called or a Read fails on the underlying ReadCloser.
type notifyingReader struct {
io.ReadCloser
notify chan<- struct{}
}
func (r *notifyingReader) Read(p []byte) (int, error) {
n, err := r.ReadCloser.Read(p)
if err != nil && r.notify != nil {
close(r.notify)
r.notify = nil
}
return n, err
}
func (r *notifyingReader) Close() error {
err := r.ReadCloser.Close()
if r.notify != nil {
close(r.notify)
r.notify = nil
}
return err
}

View file

@ -0,0 +1,28 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9,go1.7
package ctxhttp
import (
"io"
"net/http"
"net/http/httptest"
"testing"
"context"
)
func TestGo17Context(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "ok")
}))
ctx := context.Background()
resp, err := Get(ctx, http.DefaultClient, ts.URL)
if resp == nil || err != nil {
t.Fatalf("error received from client: %v %v", err, resp)
}
resp.Body.Close()
}

View file

@ -0,0 +1,147 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
import (
"io"
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
)
func nop() {}
var (
testHookContextDoneBeforeHeaders = nop
testHookDoReturned = nop
testHookDidBodyClose = nop
)
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
// TODO(djd): Respect any existing value of req.Cancel.
cancel := make(chan struct{})
req.Cancel = cancel
type responseAndError struct {
resp *http.Response
err error
}
result := make(chan responseAndError, 1)
// Make local copies of test hooks closed over by goroutines below.
// Prevents data races in tests.
testHookDoReturned := testHookDoReturned
testHookDidBodyClose := testHookDidBodyClose
go func() {
resp, err := client.Do(req)
testHookDoReturned()
result <- responseAndError{resp, err}
}()
var resp *http.Response
select {
case <-ctx.Done():
testHookContextDoneBeforeHeaders()
close(cancel)
// Clean up after the goroutine calling client.Do:
go func() {
if r := <-result; r.resp != nil {
testHookDidBodyClose()
r.resp.Body.Close()
}
}()
return nil, ctx.Err()
case r := <-result:
var err error
resp, err = r.resp, r.err
if err != nil {
return resp, err
}
}
c := make(chan struct{})
go func() {
select {
case <-ctx.Done():
close(cancel)
case <-c:
// The response's Body is closed.
}
}()
resp.Body = &notifyingReader{resp.Body, c}
return resp, nil
}
// Get issues a GET request via the Do function.
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Head issues a HEAD request via the Do function.
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Post issues a POST request via the Do function.
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return Do(ctx, client, req)
}
// PostForm issues a POST request via the Do function.
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}
// notifyingReader is an io.ReadCloser that closes the notify channel after
// Close is called or a Read fails on the underlying ReadCloser.
type notifyingReader struct {
io.ReadCloser
notify chan<- struct{}
}
func (r *notifyingReader) Read(p []byte) (int, error) {
n, err := r.ReadCloser.Read(p)
if err != nil && r.notify != nil {
close(r.notify)
r.notify = nil
}
return n, err
}
func (r *notifyingReader) Close() error {
err := r.ReadCloser.Close()
if r.notify != nil {
close(r.notify)
r.notify = nil
}
return err
}

View file

@ -0,0 +1,79 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9,!go1.7
package ctxhttp
import (
"net"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"golang.org/x/net/context"
)
// golang.org/issue/14065
func TestClosesResponseBodyOnCancel(t *testing.T) {
defer func() { testHookContextDoneBeforeHeaders = nop }()
defer func() { testHookDoReturned = nop }()
defer func() { testHookDidBodyClose = nop }()
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
defer ts.Close()
ctx, cancel := context.WithCancel(context.Background())
// closed when Do enters select case <-ctx.Done()
enteredDonePath := make(chan struct{})
testHookContextDoneBeforeHeaders = func() {
close(enteredDonePath)
}
testHookDoReturned = func() {
// We now have the result (the Flush'd headers) at least,
// so we can cancel the request.
cancel()
// But block the client.Do goroutine from sending
// until Do enters into the <-ctx.Done() path, since
// otherwise if both channels are readable, select
// picks a random one.
<-enteredDonePath
}
sawBodyClose := make(chan struct{})
testHookDidBodyClose = func() { close(sawBodyClose) }
tr := &http.Transport{}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
req, _ := http.NewRequest("GET", ts.URL, nil)
_, doErr := Do(ctx, c, req)
select {
case <-sawBodyClose:
case <-time.After(5 * time.Second):
t.Fatal("timeout waiting for body to close")
}
if doErr != ctx.Err() {
t.Errorf("Do error = %v; want %v", doErr, ctx.Err())
}
}
type noteCloseConn struct {
net.Conn
onceClose sync.Once
closefn func()
}
func (c *noteCloseConn) Close() error {
c.onceClose.Do(c.closefn)
return c.Conn.Close()
}

View file

@ -7,11 +7,10 @@
package ctxhttp package ctxhttp
import ( import (
"io"
"io/ioutil" "io/ioutil"
"net"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"sync"
"testing" "testing"
"time" "time"
@ -23,59 +22,62 @@ const (
requestBody = "ok" requestBody = "ok"
) )
func okHandler(w http.ResponseWriter, r *http.Request) {
time.Sleep(requestDuration)
io.WriteString(w, requestBody)
}
func TestNoTimeout(t *testing.T) { func TestNoTimeout(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(okHandler))
defer ts.Close()
ctx := context.Background() ctx := context.Background()
resp, err := doRequest(ctx) res, err := Get(ctx, nil, ts.URL)
if err != nil {
if resp == nil || err != nil { t.Fatal(err)
t.Fatalf("error received from client: %v %v", err, resp) }
defer res.Body.Close()
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(slurp) != requestBody {
t.Errorf("body = %q; want %q", slurp, requestBody)
} }
} }
func TestCancel(t *testing.T) { func TestCancelBeforeHeaders(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(requestDuration / 2) blockServer := make(chan struct{})
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cancel() cancel()
}() <-blockServer
io.WriteString(w, requestBody)
}))
defer ts.Close()
defer close(blockServer)
resp, err := doRequest(ctx) res, err := Get(ctx, nil, ts.URL)
if err == nil {
if resp != nil || err == nil { res.Body.Close()
t.Fatalf("expected error, didn't get one. resp: %v", resp) t.Fatal("Get returned unexpected nil error")
} }
if err != ctx.Err() { if err != context.Canceled {
t.Fatalf("expected error from context but got: %v", err) t.Errorf("err = %v; want %v", err, context.Canceled)
}
}
func TestCancelAfterRequest(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
resp, err := doRequest(ctx)
// Cancel before reading the body.
// Request.Body should still be readable after the context is canceled.
cancel()
b, err := ioutil.ReadAll(resp.Body)
if err != nil || string(b) != requestBody {
t.Fatalf("could not read body: %q %v", b, err)
} }
} }
func TestCancelAfterHangingRequest(t *testing.T) { func TestCancelAfterHangingRequest(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.(http.Flusher).Flush() w.(http.Flusher).Flush()
<-w.(http.CloseNotifier).CloseNotify() <-w.(http.CloseNotifier).CloseNotify()
}) }))
defer ts.Close()
serv := httptest.NewServer(handler)
defer serv.Close()
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
resp, err := Get(ctx, nil, serv.URL) resp, err := Get(ctx, nil, ts.URL)
if err != nil { if err != nil {
t.Fatalf("unexpected error in Get: %v", err) t.Fatalf("unexpected error in Get: %v", err)
} }
@ -101,76 +103,3 @@ func TestCancelAfterHangingRequest(t *testing.T) {
case <-done: case <-done:
} }
} }
func doRequest(ctx context.Context) (*http.Response, error) {
var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(requestDuration)
w.Write([]byte(requestBody))
})
serv := httptest.NewServer(okHandler)
defer serv.Close()
return Get(ctx, nil, serv.URL)
}
// golang.org/issue/14065
func TestClosesResponseBodyOnCancel(t *testing.T) {
defer func() { testHookContextDoneBeforeHeaders = nop }()
defer func() { testHookDoReturned = nop }()
defer func() { testHookDidBodyClose = nop }()
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
defer ts.Close()
ctx, cancel := context.WithCancel(context.Background())
// closed when Do enters select case <-ctx.Done()
enteredDonePath := make(chan struct{})
testHookContextDoneBeforeHeaders = func() {
close(enteredDonePath)
}
testHookDoReturned = func() {
// We now have the result (the Flush'd headers) at least,
// so we can cancel the request.
cancel()
// But block the client.Do goroutine from sending
// until Do enters into the <-ctx.Done() path, since
// otherwise if both channels are readable, select
// picks a random one.
<-enteredDonePath
}
sawBodyClose := make(chan struct{})
testHookDidBodyClose = func() { close(sawBodyClose) }
tr := &http.Transport{}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
req, _ := http.NewRequest("GET", ts.URL, nil)
_, doErr := Do(ctx, c, req)
select {
case <-sawBodyClose:
case <-time.After(5 * time.Second):
t.Fatal("timeout waiting for body to close")
}
if doErr != ctx.Err() {
t.Errorf("Do error = %v; want %v", doErr, ctx.Err())
}
}
type noteCloseConn struct {
net.Conn
onceClose sync.Once
closefn func()
}
func (c *noteCloseConn) Close() error {
c.onceClose.Do(c.closefn)
return c.Conn.Close()
}

View file

@ -52,7 +52,16 @@ const (
noDialOnMiss = false noDialOnMiss = false
) )
func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection.
const singleUse = true
cc, err := p.t.dialClientConn(addr, singleUse)
if err != nil {
return nil, err
}
return cc, nil
}
p.mu.Lock() p.mu.Lock()
for _, cc := range p.conns[addr] { for _, cc := range p.conns[addr] {
if cc.CanTakeNewRequest() { if cc.CanTakeNewRequest() {
@ -95,7 +104,8 @@ func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
// run in its own goroutine. // run in its own goroutine.
func (c *dialCall) dial(addr string) { func (c *dialCall) dial(addr string) {
c.res, c.err = c.p.t.dialClientConn(addr) const singleUse = false // shared conn
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
close(c.done) close(c.done)
c.p.mu.Lock() c.p.mu.Lock()

View file

@ -454,7 +454,7 @@ func terminalReadFrameError(err error) bool {
// //
// If the frame is larger than previously set with SetMaxReadFrameSize, the // If the frame is larger than previously set with SetMaxReadFrameSize, the
// returned error is ErrFrameTooLarge. Other errors may be of type // returned error is ErrFrameTooLarge. Other errors may be of type
// ConnectionError, StreamError, or anything else from from the underlying // ConnectionError, StreamError, or anything else from the underlying
// reader. // reader.
func (fr *Framer) ReadFrame() (Frame, error) { func (fr *Framer) ReadFrame() (Frame, error) {
fr.errDetail = nil fr.errDetail = nil

View file

@ -7,6 +7,7 @@
package http2 package http2
import ( import (
"crypto/tls"
"net/http" "net/http"
"time" "time"
) )
@ -14,3 +15,29 @@ import (
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return t1.ExpectContinueTimeout return t1.ExpectContinueTimeout
} }
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
}

View file

@ -7,6 +7,7 @@
package http2 package http2
import ( import (
"crypto/tls"
"net/http" "net/http"
"time" "time"
) )
@ -17,4 +18,29 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return 0 return 0
}
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
} }

View file

@ -339,30 +339,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.serve() sc.serve()
} }
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
}
func (sc *serverConn) rejectConn(err ErrCode, debug string) { func (sc *serverConn) rejectConn(err ErrCode, debug string) {
sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
// ignoring errors. hanging up anyway. // ignoring errors. hanging up anyway.
@ -1200,6 +1176,10 @@ func (sc *serverConn) closeStream(st *stream, err error) {
} }
delete(sc.streams, st.id) delete(sc.streams, st.id)
if p := st.body; p != nil { if p := st.body; p != nil {
// Return any buffered unread bytes worth of conn-level flow control.
// See golang.org/issue/16481
sc.sendWindowUpdate(nil, p.Len())
p.CloseWithError(err) p.CloseWithError(err)
} }
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
@ -1301,6 +1281,8 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
func (sc *serverConn) processData(f *DataFrame) error { func (sc *serverConn) processData(f *DataFrame) error {
sc.serveG.check() sc.serveG.check()
data := f.Data()
// "If a DATA frame is received whose stream is not in "open" // "If a DATA frame is received whose stream is not in "open"
// or "half closed (local)" state, the recipient MUST respond // or "half closed (local)" state, the recipient MUST respond
// with a stream error (Section 5.4.2) of type STREAM_CLOSED." // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
@ -1312,12 +1294,25 @@ func (sc *serverConn) processData(f *DataFrame) error {
// the http.Handler returned, so it's done reading & // the http.Handler returned, so it's done reading &
// done writing). Try to stop the client from sending // done writing). Try to stop the client from sending
// more DATA. // more DATA.
// But still enforce their connection-level flow control,
// and return any flow control bytes since we're not going
// to consume them.
if int(sc.inflow.available()) < len(data) {
return StreamError{id, ErrCodeFlowControl}
}
// Deduct the flow control from inflow, since we're
// going to immediately add it back in
// sendWindowUpdate, which also schedules sending the
// frames.
sc.inflow.take(int32(len(data)))
sc.sendWindowUpdate(nil, len(data)) // conn-level
return StreamError{id, ErrCodeStreamClosed} return StreamError{id, ErrCodeStreamClosed}
} }
if st.body == nil { if st.body == nil {
panic("internal error: should have a body in this state") panic("internal error: should have a body in this state")
} }
data := f.Data()
// Sender sending more than they'd declared? // Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {

View file

@ -104,6 +104,8 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}
case optQuiet: case optQuiet:
quiet = true quiet = true
} }
case func(net.Conn, http.ConnState):
ts.Config.ConnState = v
default: default:
t.Fatalf("unknown newServerTester option type %T", v) t.Fatalf("unknown newServerTester option type %T", v)
} }
@ -2165,6 +2167,9 @@ func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
// it did before. // it did before.
st.writeData(1, true, []byte("foo")) st.writeData(1, true, []byte("foo"))
// Get our flow control bytes back, since the handler didn't get them.
st.wantWindowUpdate(0, uint32(len("foo")))
// Sent after a peer sends data anyway (admittedly the // Sent after a peer sends data anyway (admittedly the
// previous RST_STREAM might've still been in-flight), // previous RST_STREAM might've still been in-flight),
// but they'll get the more friendly 'cancel' code // but they'll get the more friendly 'cancel' code
@ -3299,3 +3304,43 @@ func TestExpect100ContinueAfterHandlerWrites(t *testing.T) {
t.Fatalf("second msg = %q; want %q", buf, msg2) t.Fatalf("second msg = %q; want %q", buf, msg2)
} }
} }
type funcReader func([]byte) (n int, err error)
func (f funcReader) Read(p []byte) (n int, err error) { return f(p) }
// golang.org/issue/16481 -- return flow control when streams close with unread data.
// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport)
func TestUnreadFlowControlReturned_Server(t *testing.T) {
unblock := make(chan bool, 1)
defer close(unblock)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// Don't read the 16KB request body. Wait until the client's
// done sending it and then return. This should cause the Server
// to then return those 16KB of flow control to the client.
<-unblock
}, optOnlyServer)
defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
// This previously hung on the 4th iteration.
for i := 0; i < 6; i++ {
body := io.MultiReader(
io.LimitReader(neverEnding('A'), 16<<10),
funcReader(func([]byte) (n int, err error) {
unblock <- true
return 0, io.EOF
}),
)
req, _ := http.NewRequest("POST", st.ts.URL, body)
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
}
}

View file

@ -139,9 +139,10 @@ func (t *Transport) initConnPool() {
// ClientConn is the state of a single HTTP/2 client connection to an // ClientConn is the state of a single HTTP/2 client connection to an
// HTTP/2 server. // HTTP/2 server.
type ClientConn struct { type ClientConn struct {
t *Transport t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls tlsState *tls.ConnectionState // nil only for specialized impls
singleUse bool // whether being used for a single http.Request
// readLoop goroutine fields: // readLoop goroutine fields:
readerDone chan struct{} // closed on error readerDone chan struct{} // closed on error
@ -153,6 +154,7 @@ type ClientConn struct {
inflow flow // peer's conn-level flow control inflow flow // peer's conn-level flow control
closed bool closed bool
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated streams map[uint32]*clientStream // client-initiated
nextStreamID uint32 nextStreamID uint32
bw *bufio.Writer bw *bufio.Writer
@ -337,7 +339,7 @@ func shouldRetryRequest(req *http.Request, err error) bool {
return err == errClientConnUnusable return err == errClientConnUnusable
} }
func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
host, _, err := net.SplitHostPort(addr) host, _, err := net.SplitHostPort(addr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -346,7 +348,7 @@ func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return t.NewClientConn(tconn) return t.newClientConn(tconn, singleUse)
} }
func (t *Transport) newTLSConfig(host string) *tls.Config { func (t *Transport) newTLSConfig(host string) *tls.Config {
@ -407,6 +409,10 @@ func (t *Transport) expectContinueTimeout() time.Duration {
} }
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, false)
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
if VerboseLogs { if VerboseLogs {
t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
} }
@ -424,6 +430,7 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
initialWindowSize: 65535, // spec default initialWindowSize: 65535, // spec default
maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
streams: make(map[uint32]*clientStream), streams: make(map[uint32]*clientStream),
singleUse: singleUse,
} }
cc.cond = sync.NewCond(&cc.mu) cc.cond = sync.NewCond(&cc.mu)
cc.flow.add(int32(initialWindowSize)) cc.flow.add(int32(initialWindowSize))
@ -494,7 +501,17 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
func (cc *ClientConn) setGoAway(f *GoAwayFrame) { func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock() defer cc.mu.Unlock()
old := cc.goAway
cc.goAway = f cc.goAway = f
// Merge the previous and current GoAway error frames.
if cc.goAwayDebug == "" {
cc.goAwayDebug = string(f.DebugData())
}
if old != nil && old.ErrCode != ErrCodeNo {
cc.goAway.ErrCode = old.ErrCode
}
} }
func (cc *ClientConn) CanTakeNewRequest() bool { func (cc *ClientConn) CanTakeNewRequest() bool {
@ -504,6 +521,9 @@ func (cc *ClientConn) CanTakeNewRequest() bool {
} }
func (cc *ClientConn) canTakeNewRequestLocked() bool { func (cc *ClientConn) canTakeNewRequestLocked() bool {
if cc.singleUse && cc.nextStreamID > 1 {
return false
}
return cc.goAway == nil && !cc.closed && return cc.goAway == nil && !cc.closed &&
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
cc.nextStreamID < 2147483647 cc.nextStreamID < 2147483647
@ -732,30 +752,34 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
bodyWritten := false bodyWritten := false
ctx := reqContext(req) ctx := reqContext(req)
handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
res := re.res
if re.err != nil || res.StatusCode > 299 {
// On error or status code 3xx, 4xx, 5xx, etc abort any
// ongoing write, assuming that the server doesn't care
// about our request body. If the server replied with 1xx or
// 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server
// doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite)
}
if re.err != nil {
cc.forgetStreamID(cs.ID)
return nil, re.err
}
res.Request = req
res.TLS = cc.tlsState
return res, nil
}
for { for {
select { select {
case re := <-readLoopResCh: case re := <-readLoopResCh:
res := re.res return handleReadLoopResponse(re)
if re.err != nil || res.StatusCode > 299 {
// On error or status code 3xx, 4xx, 5xx, etc abort any
// ongoing write, assuming that the server doesn't care
// about our request body. If the server replied with 1xx or
// 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server
// doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite)
}
if re.err != nil {
cc.forgetStreamID(cs.ID)
return nil, re.err
}
res.Request = req
res.TLS = cc.tlsState
return res, nil
case <-respHeaderTimer: case <-respHeaderTimer:
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten { if !hasBody || bodyWritten {
@ -789,6 +813,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// forgetStreamID. // forgetStreamID.
return nil, cs.resetErr return nil, cs.resetErr
case err := <-bodyWriter.resc: case err := <-bodyWriter.resc:
// Prefer the read loop's response, if available. Issue 16102.
select {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
default:
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1160,6 +1190,19 @@ func (cc *ClientConn) readLoop() {
} }
} }
// GoAwayError is returned by the Transport when the server closes the
// TCP connection after sending a GOAWAY frame.
type GoAwayError struct {
LastStreamID uint32
ErrCode ErrCode
DebugData string
}
func (e GoAwayError) Error() string {
return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
e.LastStreamID, e.ErrCode, e.DebugData)
}
func (rl *clientConnReadLoop) cleanup() { func (rl *clientConnReadLoop) cleanup() {
cc := rl.cc cc := rl.cc
defer cc.tconn.Close() defer cc.tconn.Close()
@ -1170,10 +1213,18 @@ func (rl *clientConnReadLoop) cleanup() {
// TODO: also do this if we've written the headers but not // TODO: also do this if we've written the headers but not
// gotten a response yet. // gotten a response yet.
err := cc.readerErr err := cc.readerErr
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
cc.mu.Lock() cc.mu.Lock()
if err == io.EOF {
if cc.goAway != nil {
err = GoAwayError{
LastStreamID: cc.goAway.LastStreamID,
ErrCode: cc.goAway.ErrCode,
DebugData: cc.goAwayDebug,
}
} else {
err = io.ErrUnexpectedEOF
}
}
for _, cs := range rl.activeRes { for _, cs := range rl.activeRes {
cs.bufPipe.CloseWithError(err) cs.bufPipe.CloseWithError(err)
} }
@ -1191,7 +1242,7 @@ func (rl *clientConnReadLoop) cleanup() {
func (rl *clientConnReadLoop) run() error { func (rl *clientConnReadLoop) run() error {
cc := rl.cc cc := rl.cc
rl.closeWhenIdle = cc.t.disableKeepAlives() rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
gotReply := false // ever saw a reply gotReply := false // ever saw a reply
for { for {
f, err := cc.fr.ReadFrame() f, err := cc.fr.ReadFrame()
@ -1486,10 +1537,27 @@ var errClosedResponseBody = errors.New("http2: response body closed")
func (b transportResponseBody) Close() error { func (b transportResponseBody) Close() error {
cs := b.cs cs := b.cs
if cs.bufPipe.Err() != io.EOF { cc := cs.cc
// TODO: write test for this
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
unread := cs.bufPipe.Len()
if unread > 0 || !serverSentStreamEnd {
cc.mu.Lock()
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
}
// Return connection-level flow control.
if unread > 0 {
cc.inflow.add(int32(unread))
cc.fr.WriteWindowUpdate(0, uint32(unread))
}
cc.bw.Flush()
cc.wmu.Unlock()
cc.mu.Unlock()
} }
cs.bufPipe.BreakWithError(errClosedResponseBody) cs.bufPipe.BreakWithError(errClosedResponseBody)
return nil return nil
} }
@ -1497,6 +1565,7 @@ func (b transportResponseBody) Close() error {
func (rl *clientConnReadLoop) processData(f *DataFrame) error { func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc cc := rl.cc
cs := cc.streamByID(f.StreamID, f.StreamEnded()) cs := cc.streamByID(f.StreamID, f.StreamEnded())
data := f.Data()
if cs == nil { if cs == nil {
cc.mu.Lock() cc.mu.Lock()
neverSent := cc.nextStreamID neverSent := cc.nextStreamID
@ -1510,9 +1579,17 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// TODO: be stricter here? only silently ignore things which // TODO: be stricter here? only silently ignore things which
// we canceled, but not things which were closed normally // we canceled, but not things which were closed normally
// by the peer? Tough without accumulating too much state. // by the peer? Tough without accumulating too much state.
// But at least return their flow control:
if len(data) > 0 {
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(len(data)))
cc.bw.Flush()
cc.wmu.Unlock()
}
return nil return nil
} }
if data := f.Data(); len(data) > 0 { if len(data) > 0 {
if cs.bufPipe.b == nil { if cs.bufPipe.b == nil {
// Data frame after it's already closed? // Data frame after it's already closed?
cc.logf("http2: Transport received DATA frame for closed stream; closing connection") cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
@ -1557,7 +1634,7 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
} }
cs.bufPipe.closeWithErrorAndCode(err, code) cs.bufPipe.closeWithErrorAndCode(err, code)
delete(rl.activeRes, cs.ID) delete(rl.activeRes, cs.ID)
if cs.req.Close || cs.req.Header.Get("Connection") == "close" { if isConnectionCloseRequest(cs.req) {
rl.closeWhenIdle = true rl.closeWhenIdle = true
} }
} }
@ -1679,8 +1756,10 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
} }
func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
// TODO: do something with err? send it as a debug frame to the peer? // TODO: map err to more interesting error codes, once the
// But that's only in GOAWAY. Invent a new frame type? Is there one already? // HTTP community comes up with some. But currently for
// RST_STREAM there's no equivalent to GOAWAY frame's debug
// data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock() cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code) cc.fr.WriteRSTStream(streamID, code)
cc.bw.Flush() cc.bw.Flush()
@ -1830,3 +1909,9 @@ func (s bodyWriterState) scheduleBodyWrite() {
s.timer.Reset(s.delay) s.timer.Reset(s.delay)
} }
} }
// isConnectionCloseRequest reports whether req should use its own
// connection for a single request and then close the connection.
func isConnectionCloseRequest(req *http.Request) bool {
return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
}

View file

@ -150,7 +150,9 @@ func TestTransport(t *testing.T) {
func onSameConn(t *testing.T, modReq func(*http.Request)) bool { func onSameConn(t *testing.T, modReq func(*http.Request)) bool {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, r.RemoteAddr) io.WriteString(w, r.RemoteAddr)
}, optOnlyServer) }, optOnlyServer, func(c net.Conn, st http.ConnState) {
t.Logf("conn %v is now state %v", c.RemoteAddr(), st)
})
defer st.Close() defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure} tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections() defer tr.CloseIdleConnections()
@ -755,18 +757,18 @@ func testTransportReqBodyAfterResponse(t *testing.T, status int) {
if f.StreamEnded() { if f.StreamEnded() {
return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f)
} }
time.Sleep(50 * time.Millisecond) // let client send body
enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)})
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: f.StreamID,
EndHeaders: true,
EndStream: false,
BlockFragment: buf.Bytes(),
})
case *DataFrame: case *DataFrame:
dataLen := len(f.Data()) dataLen := len(f.Data())
dataRecv += int64(dataLen)
if dataLen > 0 { if dataLen > 0 {
if dataRecv == 0 {
enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)})
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: f.StreamID,
EndHeaders: true,
EndStream: false,
BlockFragment: buf.Bytes(),
})
}
if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
return err return err
} }
@ -774,6 +776,8 @@ func testTransportReqBodyAfterResponse(t *testing.T, status int) {
return err return err
} }
} }
dataRecv += int64(dataLen)
if !closed && ((status != 200 && dataRecv > 0) || if !closed && ((status != 200 && dataRecv > 0) ||
(status == 200 && dataRecv == bodySize)) { (status == 200 && dataRecv == bodySize)) {
closed = true closed = true
@ -1013,41 +1017,38 @@ func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerTy
if err != nil { if err != nil {
return err return err
} }
endStream := false
send := func(mode headerType) {
hbf := buf.Bytes()
switch mode {
case oneHeader:
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: f.Header().StreamID,
EndHeaders: true,
EndStream: endStream,
BlockFragment: hbf,
})
case splitHeader:
if len(hbf) < 2 {
panic("too small")
}
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: f.Header().StreamID,
EndHeaders: false,
EndStream: endStream,
BlockFragment: hbf[:1],
})
ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:])
default:
panic("bogus mode")
}
}
switch f := f.(type) { switch f := f.(type) {
case *WindowUpdateFrame, *SettingsFrame: case *WindowUpdateFrame, *SettingsFrame:
case *DataFrame: case *DataFrame:
// ignore for now. if !f.StreamEnded() {
case *HeadersFrame: // No need to send flow control tokens. The test request body is tiny.
endStream := false continue
send := func(mode headerType) {
hbf := buf.Bytes()
switch mode {
case oneHeader:
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: f.StreamID,
EndHeaders: true,
EndStream: endStream,
BlockFragment: hbf,
})
case splitHeader:
if len(hbf) < 2 {
panic("too small")
}
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: f.StreamID,
EndHeaders: false,
EndStream: endStream,
BlockFragment: hbf[:1],
})
ct.fr.WriteContinuation(f.StreamID, true, hbf[1:])
default:
panic("bogus mode")
}
}
if expect100Continue != noHeader {
buf.Reset()
enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
send(expect100Continue)
} }
// Response headers (1+ frames; 1 or 2 in this test, but never 0) // Response headers (1+ frames; 1 or 2 in this test, but never 0)
{ {
@ -1071,7 +1072,15 @@ func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerTy
enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"})
send(trailers) send(trailers)
} }
return nil if endStream {
return nil
}
case *HeadersFrame:
if expect100Continue != noHeader {
buf.Reset()
enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
send(expect100Continue)
}
} }
} }
} }
@ -2011,3 +2020,160 @@ func TestTransportFlowControl(t *testing.T) {
time.Sleep(1 * time.Millisecond) time.Sleep(1 * time.Millisecond)
} }
} }
// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make
// the Transport remember it and return it back to users (via
// RoundTrip or request body reads) if needed (e.g. if the server
// proceeds to close the TCP connection before the client gets its
// response)
func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) {
testTransportUsesGoAwayDebugError(t, false)
}
func TestTransportUsesGoAwayDebugError_Body(t *testing.T) {
testTransportUsesGoAwayDebugError(t, true)
}
func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) {
ct := newClientTester(t)
clientDone := make(chan struct{})
const goAwayErrCode = ErrCodeHTTP11Required // arbitrary
const goAwayDebugData = "some debug data"
ct.client = func() error {
defer close(clientDone)
req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
res, err := ct.tr.RoundTrip(req)
if failMidBody {
if err != nil {
return fmt.Errorf("unexpected client RoundTrip error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
}
want := GoAwayError{
LastStreamID: 5,
ErrCode: goAwayErrCode,
DebugData: goAwayDebugData,
}
if !reflect.DeepEqual(err, want) {
t.Errorf("RoundTrip error = %T: %#v, want %T (%#T)", err, err, want, want)
}
return nil
}
ct.server = func() error {
ct.greet()
for {
f, err := ct.fr.ReadFrame()
if err != nil {
t.Logf("ReadFrame: %v", err)
return nil
}
hf, ok := f.(*HeadersFrame)
if !ok {
continue
}
if failMidBody {
var buf bytes.Buffer
enc := hpack.NewEncoder(&buf)
enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"})
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: hf.StreamID,
EndHeaders: true,
EndStream: false,
BlockFragment: buf.Bytes(),
})
}
// Write two GOAWAY frames, to test that the Transport takes
// the interesting parts of both.
ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData))
ct.fr.WriteGoAway(5, goAwayErrCode, nil)
ct.sc.Close()
<-clientDone
return nil
}
}
ct.run()
}
// See golang.org/issue/16481
func TestTransportReturnsUnusedFlowControl(t *testing.T) {
ct := newClientTester(t)
clientClosed := make(chan bool, 1)
serverWroteBody := make(chan bool, 1)
ct.client = func() error {
req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
res, err := ct.tr.RoundTrip(req)
if err != nil {
return err
}
<-serverWroteBody
if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 {
return fmt.Errorf("body read = %v, %v; want 1, nil", n, err)
}
res.Body.Close() // leaving 4999 bytes unread
clientClosed <- true
return nil
}
ct.server = func() error {
ct.greet()
var hf *HeadersFrame
for {
f, err := ct.fr.ReadFrame()
if err != nil {
return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
}
switch f.(type) {
case *WindowUpdateFrame, *SettingsFrame:
continue
}
var ok bool
hf, ok = f.(*HeadersFrame)
if !ok {
return fmt.Errorf("Got %T; want HeadersFrame", f)
}
break
}
var buf bytes.Buffer
enc := hpack.NewEncoder(&buf)
enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"})
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: hf.StreamID,
EndHeaders: true,
EndStream: false,
BlockFragment: buf.Bytes(),
})
ct.fr.WriteData(hf.StreamID, false, make([]byte, 5000)) // without ending stream
serverWroteBody <- true
<-clientClosed
f, err := ct.fr.ReadFrame()
if err != nil {
return fmt.Errorf("ReadFrame while waiting for RSTStreamFrame: %v", err)
}
if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel {
return fmt.Errorf("Expected a WindowUpdateFrame with code cancel; got %v", summarizeFrame(f))
}
// And wait for our flow control tokens back:
f, err = ct.fr.ReadFrame()
if err != nil {
return fmt.Errorf("ReadFrame while waiting for WindowUpdateFrame: %v", err)
}
if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 {
return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f))
}
return nil
}
ct.run()
}

View file

@ -58,7 +58,7 @@ func genzsys() error {
switch { switch {
case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris":
b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv4\n"), 1) b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv4\n"), 1)
case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"): case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"):
b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv4\n"), 1) b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv4\n"), 1)
} }
b, err = format.Source(b) b, err = format.Source(b)

148
vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go generated vendored Normal file
View file

@ -0,0 +1,148 @@
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs defs_linux.go
// +build linux,ppc
package ipv4
const (
sysIP_TOS = 0x1
sysIP_TTL = 0x2
sysIP_HDRINCL = 0x3
sysIP_OPTIONS = 0x4
sysIP_ROUTER_ALERT = 0x5
sysIP_RECVOPTS = 0x6
sysIP_RETOPTS = 0x7
sysIP_PKTINFO = 0x8
sysIP_PKTOPTIONS = 0x9
sysIP_MTU_DISCOVER = 0xa
sysIP_RECVERR = 0xb
sysIP_RECVTTL = 0xc
sysIP_RECVTOS = 0xd
sysIP_MTU = 0xe
sysIP_FREEBIND = 0xf
sysIP_TRANSPARENT = 0x13
sysIP_RECVRETOPTS = 0x7
sysIP_ORIGDSTADDR = 0x14
sysIP_RECVORIGDSTADDR = 0x14
sysIP_MINTTL = 0x15
sysIP_NODEFRAG = 0x16
sysIP_UNICAST_IF = 0x32
sysIP_MULTICAST_IF = 0x20
sysIP_MULTICAST_TTL = 0x21
sysIP_MULTICAST_LOOP = 0x22
sysIP_ADD_MEMBERSHIP = 0x23
sysIP_DROP_MEMBERSHIP = 0x24
sysIP_UNBLOCK_SOURCE = 0x25
sysIP_BLOCK_SOURCE = 0x26
sysIP_ADD_SOURCE_MEMBERSHIP = 0x27
sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
sysIP_MSFILTER = 0x29
sysMCAST_JOIN_GROUP = 0x2a
sysMCAST_LEAVE_GROUP = 0x2d
sysMCAST_JOIN_SOURCE_GROUP = 0x2e
sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
sysMCAST_BLOCK_SOURCE = 0x2b
sysMCAST_UNBLOCK_SOURCE = 0x2c
sysMCAST_MSFILTER = 0x30
sysIP_MULTICAST_ALL = 0x31
sysICMP_FILTER = 0x1
sysSO_EE_ORIGIN_NONE = 0x0
sysSO_EE_ORIGIN_LOCAL = 0x1
sysSO_EE_ORIGIN_ICMP = 0x2
sysSO_EE_ORIGIN_ICMP6 = 0x3
sysSO_EE_ORIGIN_TXSTATUS = 0x4
sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
sysSOL_SOCKET = 0x1
sysSO_ATTACH_FILTER = 0x1a
sysSizeofKernelSockaddrStorage = 0x80
sysSizeofSockaddrInet = 0x10
sysSizeofInetPktinfo = 0xc
sysSizeofSockExtendedErr = 0x10
sysSizeofIPMreq = 0x8
sysSizeofIPMreqn = 0xc
sysSizeofIPMreqSource = 0xc
sysSizeofGroupReq = 0x84
sysSizeofGroupSourceReq = 0x104
sysSizeofICMPFilter = 0x4
)
type sysKernelSockaddrStorage struct {
Family uint16
X__data [126]uint8
}
type sysSockaddrInet struct {
Family uint16
Port uint16
Addr [4]byte /* in_addr */
X__pad [8]uint8
}
type sysInetPktinfo struct {
Ifindex int32
Spec_dst [4]byte /* in_addr */
Addr [4]byte /* in_addr */
}
type sysSockExtendedErr struct {
Errno uint32
Origin uint8
Type uint8
Code uint8
Pad uint8
Info uint32
Data uint32
}
type sysIPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type sysIPMreqn struct {
Multiaddr [4]byte /* in_addr */
Address [4]byte /* in_addr */
Ifindex int32
}
type sysIPMreqSource struct {
Multiaddr uint32
Interface uint32
Sourceaddr uint32
}
type sysGroupReq struct {
Interface uint32
Group sysKernelSockaddrStorage
}
type sysGroupSourceReq struct {
Interface uint32
Group sysKernelSockaddrStorage
Source sysKernelSockaddrStorage
}
type sysICMPFilter struct {
Data uint32
}
type sysSockFProg struct {
Len uint16
Pad_cgo_0 [2]byte
Filter *sysSockFilter
}
type sysSockFilter struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}

View file

@ -58,7 +58,7 @@ func genzsys() error {
switch { switch {
case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris":
b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv6\n"), 1) b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv6\n"), 1)
case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"): case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"):
b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv6\n"), 1) b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv6\n"), 1)
} }
b, err = format.Source(b) b, err = format.Source(b)

170
vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go generated vendored Normal file
View file

@ -0,0 +1,170 @@
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs defs_linux.go
// +build linux,ppc
package ipv6
const (
sysIPV6_ADDRFORM = 0x1
sysIPV6_2292PKTINFO = 0x2
sysIPV6_2292HOPOPTS = 0x3
sysIPV6_2292DSTOPTS = 0x4
sysIPV6_2292RTHDR = 0x5
sysIPV6_2292PKTOPTIONS = 0x6
sysIPV6_CHECKSUM = 0x7
sysIPV6_2292HOPLIMIT = 0x8
sysIPV6_NEXTHOP = 0x9
sysIPV6_FLOWINFO = 0xb
sysIPV6_UNICAST_HOPS = 0x10
sysIPV6_MULTICAST_IF = 0x11
sysIPV6_MULTICAST_HOPS = 0x12
sysIPV6_MULTICAST_LOOP = 0x13
sysIPV6_ADD_MEMBERSHIP = 0x14
sysIPV6_DROP_MEMBERSHIP = 0x15
sysMCAST_JOIN_GROUP = 0x2a
sysMCAST_LEAVE_GROUP = 0x2d
sysMCAST_JOIN_SOURCE_GROUP = 0x2e
sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
sysMCAST_BLOCK_SOURCE = 0x2b
sysMCAST_UNBLOCK_SOURCE = 0x2c
sysMCAST_MSFILTER = 0x30
sysIPV6_ROUTER_ALERT = 0x16
sysIPV6_MTU_DISCOVER = 0x17
sysIPV6_MTU = 0x18
sysIPV6_RECVERR = 0x19
sysIPV6_V6ONLY = 0x1a
sysIPV6_JOIN_ANYCAST = 0x1b
sysIPV6_LEAVE_ANYCAST = 0x1c
sysIPV6_FLOWLABEL_MGR = 0x20
sysIPV6_FLOWINFO_SEND = 0x21
sysIPV6_IPSEC_POLICY = 0x22
sysIPV6_XFRM_POLICY = 0x23
sysIPV6_RECVPKTINFO = 0x31
sysIPV6_PKTINFO = 0x32
sysIPV6_RECVHOPLIMIT = 0x33
sysIPV6_HOPLIMIT = 0x34
sysIPV6_RECVHOPOPTS = 0x35
sysIPV6_HOPOPTS = 0x36
sysIPV6_RTHDRDSTOPTS = 0x37
sysIPV6_RECVRTHDR = 0x38
sysIPV6_RTHDR = 0x39
sysIPV6_RECVDSTOPTS = 0x3a
sysIPV6_DSTOPTS = 0x3b
sysIPV6_RECVPATHMTU = 0x3c
sysIPV6_PATHMTU = 0x3d
sysIPV6_DONTFRAG = 0x3e
sysIPV6_RECVTCLASS = 0x42
sysIPV6_TCLASS = 0x43
sysIPV6_ADDR_PREFERENCES = 0x48
sysIPV6_PREFER_SRC_TMP = 0x1
sysIPV6_PREFER_SRC_PUBLIC = 0x2
sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
sysIPV6_PREFER_SRC_COA = 0x4
sysIPV6_PREFER_SRC_HOME = 0x400
sysIPV6_PREFER_SRC_CGA = 0x8
sysIPV6_PREFER_SRC_NONCGA = 0x800
sysIPV6_MINHOPCOUNT = 0x49
sysIPV6_ORIGDSTADDR = 0x4a
sysIPV6_RECVORIGDSTADDR = 0x4a
sysIPV6_TRANSPARENT = 0x4b
sysIPV6_UNICAST_IF = 0x4c
sysICMPV6_FILTER = 0x1
sysICMPV6_FILTER_BLOCK = 0x1
sysICMPV6_FILTER_PASS = 0x2
sysICMPV6_FILTER_BLOCKOTHERS = 0x3
sysICMPV6_FILTER_PASSONLY = 0x4
sysSOL_SOCKET = 0x1
sysSO_ATTACH_FILTER = 0x1a
sysSizeofKernelSockaddrStorage = 0x80
sysSizeofSockaddrInet6 = 0x1c
sysSizeofInet6Pktinfo = 0x14
sysSizeofIPv6Mtuinfo = 0x20
sysSizeofIPv6FlowlabelReq = 0x20
sysSizeofIPv6Mreq = 0x14
sysSizeofGroupReq = 0x84
sysSizeofGroupSourceReq = 0x104
sysSizeofICMPv6Filter = 0x20
)
type sysKernelSockaddrStorage struct {
Family uint16
X__data [126]uint8
}
type sysSockaddrInet6 struct {
Family uint16
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type sysInet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex int32
}
type sysIPv6Mtuinfo struct {
Addr sysSockaddrInet6
Mtu uint32
}
type sysIPv6FlowlabelReq struct {
Dst [16]byte /* in6_addr */
Label uint32
Action uint8
Share uint8
Flags uint16
Expires uint16
Linger uint16
X__flr_pad uint32
}
type sysIPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Ifindex int32
}
type sysGroupReq struct {
Interface uint32
Group sysKernelSockaddrStorage
}
type sysGroupSourceReq struct {
Interface uint32
Group sysKernelSockaddrStorage
Source sysKernelSockaddrStorage
}
type sysICMPv6Filter struct {
Data [8]uint32
}
type sysSockFProg struct {
Len uint16
Pad_cgo_0 [2]byte
Filter *sysSockFilter
}
type sysSockFilter struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}

View file

@ -420,7 +420,7 @@ var children=[...]uint32{
fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n",
c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType))
} else { } else {
fmt.Fprintf(w, "0x%08x,\n", c) fmt.Fprintf(w, "0x%x,\n", c)
} }
} }
fmt.Fprintf(w, "}\n\n") fmt.Fprintf(w, "}\n\n")
@ -560,7 +560,7 @@ func printNode(w io.Writer, n *node) error {
nodeTypeStr(c.nodeType), icannStr(c.icann), c.label, nodeTypeStr(c.nodeType), icannStr(c.icann), c.label,
) )
} else { } else {
fmt.Fprintf(w, "0x%08x,\n", encoding) fmt.Fprintf(w, "0x%x,\n", encoding)
} }
} }
return nil return nil

File diff suppressed because it is too large Load diff

View file

@ -215,16 +215,12 @@ var rules = [...]string{
"pro.az", "pro.az",
"biz.az", "biz.az",
"ba", "ba",
"org.ba", "com.ba",
"net.ba",
"edu.ba", "edu.ba",
"gov.ba", "gov.ba",
"mil.ba", "mil.ba",
"unsa.ba", "net.ba",
"unbi.ba", "org.ba",
"co.ba",
"com.ba",
"rs.ba",
"bb", "bb",
"biz.bb", "biz.bb",
"co.bb", "co.bb",
@ -2215,7 +2211,6 @@ var rules = [...]string{
"kamiamakusa.kumamoto.jp", "kamiamakusa.kumamoto.jp",
"kashima.kumamoto.jp", "kashima.kumamoto.jp",
"kikuchi.kumamoto.jp", "kikuchi.kumamoto.jp",
"kosa.kumamoto.jp",
"kumamoto.kumamoto.jp", "kumamoto.kumamoto.jp",
"mashiki.kumamoto.jp", "mashiki.kumamoto.jp",
"mifune.kumamoto.jp", "mifune.kumamoto.jp",
@ -7286,26 +7281,27 @@ var rules = [...]string{
"zippo", "zippo",
"zone", "zone",
"zuerich", "zuerich",
"beep.pl",
"*.compute.estate", "*.compute.estate",
"*.alces.network", "*.alces.network",
"cloudfront.net", "cloudfront.net",
"compute.amazonaws.com",
"ap-northeast-1.compute.amazonaws.com", "ap-northeast-1.compute.amazonaws.com",
"ap-northeast-2.compute.amazonaws.com", "ap-northeast-2.compute.amazonaws.com",
"ap-southeast-1.compute.amazonaws.com", "ap-southeast-1.compute.amazonaws.com",
"ap-southeast-2.compute.amazonaws.com", "ap-southeast-2.compute.amazonaws.com",
"cn-north-1.compute.amazonaws.cn",
"compute-1.amazonaws.com",
"compute.amazonaws.cn",
"compute.amazonaws.com",
"eu-central-1.compute.amazonaws.com", "eu-central-1.compute.amazonaws.com",
"eu-west-1.compute.amazonaws.com", "eu-west-1.compute.amazonaws.com",
"sa-east-1.compute.amazonaws.com", "sa-east-1.compute.amazonaws.com",
"us-east-1.amazonaws.com",
"us-gov-west-1.compute.amazonaws.com", "us-gov-west-1.compute.amazonaws.com",
"us-west-1.compute.amazonaws.com", "us-west-1.compute.amazonaws.com",
"us-west-2.compute.amazonaws.com", "us-west-2.compute.amazonaws.com",
"us-east-1.amazonaws.com",
"compute-1.amazonaws.com",
"z-1.compute-1.amazonaws.com", "z-1.compute-1.amazonaws.com",
"z-2.compute-1.amazonaws.com", "z-2.compute-1.amazonaws.com",
"compute.amazonaws.com.cn",
"cn-north-1.compute.amazonaws.com.cn",
"elasticbeanstalk.com", "elasticbeanstalk.com",
"elb.amazonaws.com", "elb.amazonaws.com",
"s3.amazonaws.com", "s3.amazonaws.com",
@ -7326,8 +7322,14 @@ var rules = [...]string{
"s3.cn-north-1.amazonaws.com.cn", "s3.cn-north-1.amazonaws.com.cn",
"s3.eu-central-1.amazonaws.com", "s3.eu-central-1.amazonaws.com",
"on-aptible.com", "on-aptible.com",
"potager.org",
"poivron.org",
"sweetpepper.org",
"pimienta.org",
"myfritz.net", "myfritz.net",
"betainabox.com", "betainabox.com",
"boxfuse.io",
"mycd.eu",
"ae.org", "ae.org",
"ar.com", "ar.com",
"br.com", "br.com",
@ -7362,7 +7364,9 @@ var rules = [...]string{
"us.org", "us.org",
"co.com", "co.com",
"c.la", "c.la",
"certmgr.org",
"xenapponazure.com", "xenapponazure.com",
"virtueeldomein.nl",
"cloudcontrolled.com", "cloudcontrolled.com",
"cloudcontrolapp.com", "cloudcontrolapp.com",
"co.ca", "co.ca",
@ -7375,6 +7379,8 @@ var rules = [...]string{
"co.nl", "co.nl",
"co.no", "co.no",
"*.platform.sh", "*.platform.sh",
"realm.cz",
"*.cryptonomic.net",
"cupcake.is", "cupcake.is",
"cyon.link", "cyon.link",
"cyon.site", "cyon.site",
@ -7728,12 +7734,17 @@ var rules = [...]string{
"tr.eu.org", "tr.eu.org",
"uk.eu.org", "uk.eu.org",
"us.eu.org", "us.eu.org",
"eu-1.evennode.com",
"eu-2.evennode.com",
"us-1.evennode.com",
"us-2.evennode.com",
"apps.fbsbx.com", "apps.fbsbx.com",
"a.ssl.fastly.net", "a.ssl.fastly.net",
"b.ssl.fastly.net", "b.ssl.fastly.net",
"global.ssl.fastly.net", "global.ssl.fastly.net",
"a.prod.fastly.net", "a.prod.fastly.net",
"global.prod.fastly.net", "global.prod.fastly.net",
"fhapp.xyz",
"firebaseapp.com", "firebaseapp.com",
"flynnhub.com", "flynnhub.com",
"freebox-os.com", "freebox-os.com",
@ -7836,11 +7847,16 @@ var rules = [...]string{
"withgoogle.com", "withgoogle.com",
"withyoutube.com", "withyoutube.com",
"hashbang.sh", "hashbang.sh",
"hasura-app.io",
"hepforge.org",
"herokuapp.com", "herokuapp.com",
"herokussl.com", "herokussl.com",
"iki.fi", "iki.fi",
"biz.at", "biz.at",
"info.at", "info.at",
"*.magentosite.cloud",
"meteorapp.com",
"eu.meteorapp.com",
"co.pl", "co.pl",
"azurewebsites.net", "azurewebsites.net",
"azure-mobile.net", "azure-mobile.net",
@ -7962,9 +7978,12 @@ var rules = [...]string{
"rhcloud.com", "rhcloud.com",
"hzc.io", "hzc.io",
"sandcats.io", "sandcats.io",
"logoip.de",
"logoip.com",
"biz.ua", "biz.ua",
"co.ua", "co.ua",
"pp.ua", "pp.ua",
"myshopblocks.com",
"sinaapp.com", "sinaapp.com",
"vipsinaapp.com", "vipsinaapp.com",
"1kapp.com", "1kapp.com",
@ -7992,6 +8011,7 @@ var rules = [...]string{
"sopot.pl", "sopot.pl",
"bloxcms.com", "bloxcms.com",
"townnews-staging.com", "townnews-staging.com",
"tuxfamily.org",
"hk.com", "hk.com",
"hk.org", "hk.org",
"ltd.hk", "ltd.hk",
@ -9756,16 +9776,12 @@ var nodeLabels = [...]string{
"pp", "pp",
"pro", "pro",
"blogspot", "blogspot",
"co",
"com", "com",
"edu", "edu",
"gov", "gov",
"mil", "mil",
"net", "net",
"org", "org",
"rs",
"unbi",
"unsa",
"biz", "biz",
"co", "co",
"com", "com",
@ -9997,13 +10013,13 @@ var nodeLabels = [...]string{
"gob", "gob",
"gov", "gov",
"mil", "mil",
"magentosite",
"co", "co",
"com", "com",
"gov", "gov",
"net", "net",
"ac", "ac",
"ah", "ah",
"amazonaws",
"bj", "bj",
"com", "com",
"cq", "cq",
@ -10046,11 +10062,11 @@ var nodeLabels = [...]string{
"xz", "xz",
"yn", "yn",
"zj", "zj",
"compute",
"cn-north-1",
"amazonaws", "amazonaws",
"cn-north-1", "cn-north-1",
"compute",
"s3", "s3",
"cn-north-1",
"arts", "arts",
"com", "com",
"edu", "edu",
@ -10122,6 +10138,7 @@ var nodeLabels = [...]string{
"est-le-patron", "est-le-patron",
"est-mon-blogueur", "est-mon-blogueur",
"eu", "eu",
"evennode",
"familyds", "familyds",
"fbsbx", "fbsbx",
"firebaseapp", "firebaseapp",
@ -10259,10 +10276,13 @@ var nodeLabels = [...]string{
"kr", "kr",
"likes-pie", "likes-pie",
"likescandy", "likescandy",
"logoip",
"meteorapp",
"mex", "mex",
"myactivedirectory", "myactivedirectory",
"mydrobo", "mydrobo",
"mysecuritycamera", "mysecuritycamera",
"myshopblocks",
"myvnc", "myvnc",
"neat-url", "neat-url",
"net-freaks", "net-freaks",
@ -10361,10 +10381,15 @@ var nodeLabels = [...]string{
"s3", "s3",
"alpha", "alpha",
"beta", "beta",
"eu-1",
"eu-2",
"us-1",
"us-2",
"apps", "apps",
"api", "api",
"ext", "ext",
"gist", "gist",
"eu",
"xen", "xen",
"ac", "ac",
"co", "co",
@ -10403,6 +10428,7 @@ var nodeLabels = [...]string{
"blogspot", "blogspot",
"co", "co",
"e4", "e4",
"realm",
"blogspot", "blogspot",
"com", "com",
"dnshome", "dnshome",
@ -10412,6 +10438,7 @@ var nodeLabels = [...]string{
"istmein", "istmein",
"lebtimnetz", "lebtimnetz",
"leitungsen", "leitungsen",
"logoip",
"traeumtgerade", "traeumtgerade",
"biz", "biz",
"blogspot", "blogspot",
@ -10490,6 +10517,7 @@ var nodeLabels = [...]string{
"name", "name",
"net", "net",
"org", "org",
"mycd",
"aland", "aland",
"blogspot", "blogspot",
"dy", "dy",
@ -10724,9 +10752,11 @@ var nodeLabels = [...]string{
"selfip", "selfip",
"webhop", "webhop",
"eu", "eu",
"boxfuse",
"com", "com",
"dedyn", "dedyn",
"github", "github",
"hasura-app",
"hzc", "hzc",
"ngrok", "ngrok",
"nid", "nid",
@ -12057,7 +12087,6 @@ var nodeLabels = [...]string{
"kamiamakusa", "kamiamakusa",
"kashima", "kashima",
"kikuchi", "kikuchi",
"kosa",
"kumamoto", "kumamoto",
"mashiki", "mashiki",
"mifune", "mifune",
@ -13786,6 +13815,7 @@ var nodeLabels = [...]string{
"cloudapp", "cloudapp",
"cloudfront", "cloudfront",
"cloudfunctions", "cloudfunctions",
"cryptonomic",
"ddns", "ddns",
"dnsalias", "dnsalias",
"dnsdojo", "dnsdojo",
@ -13892,6 +13922,7 @@ var nodeLabels = [...]string{
"blogspot", "blogspot",
"bv", "bv",
"co", "co",
"virtueeldomein",
"aa", "aa",
"aarborte", "aarborte",
"aejrie", "aejrie",
@ -14702,6 +14733,7 @@ var nodeLabels = [...]string{
"cable-modem", "cable-modem",
"cdn77", "cdn77",
"cdn77-secure", "cdn77-secure",
"certmgr",
"collegefan", "collegefan",
"couchpotatofries", "couchpotatofries",
"dnsalias", "dnsalias",
@ -14721,6 +14753,7 @@ var nodeLabels = [...]string{
"from-me", "from-me",
"game-host", "game-host",
"gotdns", "gotdns",
"hepforge",
"hk", "hk",
"hobby-site", "hobby-site",
"homedns", "homedns",
@ -14753,7 +14786,10 @@ var nodeLabels = [...]string{
"mysecuritycamera", "mysecuritycamera",
"nflfan", "nflfan",
"no-ip", "no-ip",
"pimienta",
"podzone", "podzone",
"poivron",
"potager",
"read-books", "read-books",
"readmyblog", "readmyblog",
"selfip", "selfip",
@ -14762,7 +14798,9 @@ var nodeLabels = [...]string{
"serveftp", "serveftp",
"servegame", "servegame",
"stuff-4-sale", "stuff-4-sale",
"sweetpepper",
"tunk", "tunk",
"tuxfamily",
"ufcfan", "ufcfan",
"us", "us",
"webhop", "webhop",
@ -14882,6 +14920,7 @@ var nodeLabels = [...]string{
"auto", "auto",
"babia-gora", "babia-gora",
"bedzin", "bedzin",
"beep",
"beskidy", "beskidy",
"bialowieza", "bialowieza",
"bialystok", "bialystok",
@ -16003,6 +16042,7 @@ var nodeLabels = [...]string{
"xn--d1at", "xn--d1at",
"xn--o1ac", "xn--o1ac",
"xn--o1ach", "xn--o1ach",
"fhapp",
"ac", "ac",
"agric", "agric",
"alt", "alt",

View file

@ -234,7 +234,11 @@ func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) (
return nil, err return nil, err
} }
as[i] = a as[i] = a
b = b[roundup(int(b[0])):] l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
case sysAF_INET, sysAF_INET6: case sysAF_INET, sysAF_INET6:
af = int(b[1]) af = int(b[1])
a, err := parseInetAddr(af, b) a, err := parseInetAddr(af, b)
@ -242,7 +246,11 @@ func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) (
return nil, err return nil, err
} }
as[i] = a as[i] = a
b = b[roundup(int(b[0])):] l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
default: default:
l, a, err := fn(af, b) l, a, err := fn(af, b)
if err != nil { if err != nil {
@ -262,7 +270,11 @@ func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) (
return nil, err return nil, err
} }
as[i] = a as[i] = a
b = b[roundup(int(b[0])):] l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
} }
} }
return as[:], nil return as[:], nil

View file

@ -13,12 +13,12 @@ func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, erro
extOff = int(nativeEndian.Uint16(b[18:20])) extOff = int(nativeEndian.Uint16(b[18:20]))
bodyOff = int(nativeEndian.Uint16(b[16:18])) bodyOff = int(nativeEndian.Uint16(b[16:18]))
} else { } else {
if len(b) < w.bodyOff {
return nil, errMessageTooShort
}
extOff = w.extOff extOff = w.extOff
bodyOff = w.bodyOff bodyOff = w.bodyOff
} }
if len(b) < extOff || len(b) < bodyOff {
return nil, errInvalidMessage
}
l := int(nativeEndian.Uint16(b[:2])) l := int(nativeEndian.Uint16(b[:2]))
if len(b) < l { if len(b) < l {
return nil, errInvalidMessage return nil, errInvalidMessage
@ -53,11 +53,11 @@ func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message,
} }
bodyOff = int(nativeEndian.Uint16(b[16:18])) bodyOff = int(nativeEndian.Uint16(b[16:18]))
} else { } else {
if len(b) < w.bodyOff {
return nil, errMessageTooShort
}
bodyOff = w.bodyOff bodyOff = w.bodyOff
} }
if len(b) < bodyOff {
return nil, errInvalidMessage
}
l := int(nativeEndian.Uint16(b[:2])) l := int(nativeEndian.Uint16(b[:2]))
if len(b) < l { if len(b) < l {
return nil, errInvalidMessage return nil, errInvalidMessage

View file

@ -24,7 +24,11 @@ func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) {
Addrs: make([]Addr, sysRTAX_MAX), Addrs: make([]Addr, sysRTAX_MAX),
raw: b[:l], raw: b[:l],
} }
a, err := parseLinkAddr(b[int(nativeEndian.Uint16(b[4:6])):]) ll := int(nativeEndian.Uint16(b[4:6]))
if len(b) < ll {
return nil, errInvalidMessage
}
a, err := parseLinkAddr(b[ll:])
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -42,6 +46,9 @@ func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, erro
return nil, errInvalidMessage return nil, errInvalidMessage
} }
bodyOff := int(nativeEndian.Uint16(b[4:6])) bodyOff := int(nativeEndian.Uint16(b[4:6]))
if len(b) < bodyOff {
return nil, errInvalidMessage
}
m := &InterfaceAddrMessage{ m := &InterfaceAddrMessage{
Version: int(b[2]), Version: int(b[2]),
Type: int(b[3]), Type: int(b[3]),

View file

@ -42,6 +42,12 @@ func ParseRIB(typ RIBType, b []byte) ([]Message, error) {
for len(b) > 4 { for len(b) > 4 {
nmsgs++ nmsgs++
l := int(nativeEndian.Uint16(b[:2])) l := int(nativeEndian.Uint16(b[:2]))
if l == 0 {
return nil, errInvalidMessage
}
if len(b) < l {
return nil, errMessageTooShort
}
if b[2] != sysRTM_VERSION { if b[2] != sysRTM_VERSION {
b = b[l:] b = b[l:]
continue continue

View file

@ -93,3 +93,26 @@ func TestMonitorAndParseRIB(t *testing.T) {
time.Sleep(200 * time.Millisecond) time.Sleep(200 * time.Millisecond)
} }
} }
func TestParseRIBWithFuzz(t *testing.T) {
for _, fuzz := range []string{
"0\x00\x05\x050000000000000000" +
"00000000000000000000" +
"00000000000000000000" +
"00000000000000000000" +
"0000000000000\x02000000" +
"00000000",
"\x02\x00\x05\f0000000000000000" +
"0\x0200000000000000",
"\x02\x00\x05\x100000000000000\x1200" +
"0\x00\xff\x00",
"\x02\x00\x05\f0000000000000000" +
"0\x12000\x00\x02\x0000",
"\x00\x00\x00\x01\x00",
"00000",
} {
for typ := RIBType(0); typ < 256; typ++ {
ParseRIB(typ, []byte(fuzz))
}
}
}

View file

@ -19,7 +19,11 @@ func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) {
Index: int(nativeEndian.Uint16(b[6:8])), Index: int(nativeEndian.Uint16(b[6:8])),
raw: b[:l], raw: b[:l],
} }
as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[int(nativeEndian.Uint16(b[4:6])):]) ll := int(nativeEndian.Uint16(b[4:6]))
if len(b) < ll {
return nil, errInvalidMessage
}
as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:])
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -786,6 +786,9 @@ func TestMemFile(t *testing.T) {
// memFile doesn't allocate a new buffer for each of those N times. Otherwise, // memFile doesn't allocate a new buffer for each of those N times. Otherwise,
// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. // calling io.Copy(aMemFile, src) is likely to have quadratic complexity.
func TestMemFileWriteAllocs(t *testing.T) { func TestMemFileWriteAllocs(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("gccgo allocates here")
}
fs := NewMemFS() fs := NewMemFS()
f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil { if err != nil {

17
vendor/google.golang.org/grpc/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,17 @@
language: go
go:
- 1.5.3
- 1.6
before_install:
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
install:
- mkdir -p "$GOPATH/src/google.golang.org"
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc"
script:
- make test testrace

46
vendor/google.golang.org/grpc/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,46 @@
# How to contribute
We definitely welcome patches and contribution to grpc! Here are some guidelines
and information about how to do so.
## Sending patches
### Getting started
1. Check out the code:
$ go get google.golang.org/grpc
$ cd $GOPATH/src/google.golang.org/grpc
1. Create a fork of the grpc-go repository.
1. Add your fork as a remote:
$ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git
1. Make changes, commit them.
1. Run the test suite:
$ make test
1. Push your changes to your fork:
$ git push fork ...
1. Open a pull request.
## Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
## Filing Issues
When filing an issue, make sure to answer these five questions:
1. What version of Go are you using (`go version`)?
2. What operating system and processor architecture are you using?
3. What did you do?
4. What did you expect to see?
5. What did you see instead?
### Contributing code
Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.

View file

@ -0,0 +1,41 @@
# Authentication
As outlined in the [gRPC authentication guide](http://www.grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it.
# Enabling TLS on a gRPC client
```Go
conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")))
```
# Enabling TLS on a gRPC server
```Go
creds, err := credentials.NewServerTLSFromFile(certFile, keyFile)
if err != nil {
log.Fatalf("Failed to generate credentials %v", err)
}
lis, err := net.Listen("tcp", ":0")
server := grpc.NewServer(grpc.Creds(creds))
...
server.Serve(lis)
```
# Authenticating with Google
## Google Compute Engine (GCE)
```Go
conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(oauth.NewComputeEngine())))
```
## JWT
```Go
jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope)
if err != nil {
log.Fatalf("Failed to create JWT credentials: %v", err)
}
conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(jwtCreds)))
```

View file

@ -0,0 +1,201 @@
# Metadata
gRPC supports sending metadata between client and server.
This doc shows how to send and receive metadata in gRPC-go.
## Background
Four kinds of service method:
- [Unary RPC](http://www.grpc.io/docs/guides/concepts.html#unary-rpc)
- [Server streaming RPC](http://www.grpc.io/docs/guides/concepts.html#server-streaming-rpc)
- [Client streaming RPC](http://www.grpc.io/docs/guides/concepts.html#client-streaming-rpc)
- [Bidirectional streaming RPC](http://www.grpc.io/docs/guides/concepts.html#bidirectional-streaming-rpc)
And concept of [metadata](http://www.grpc.io/docs/guides/concepts.html#metadata).
## Constructing metadata
A metadata can be created using package [metadata](https://godoc.org/google.golang.org/grpc/metadata).
The type MD is actually a map from string to a list of strings:
```go
type MD map[string][]string
```
Metadata can be read like a normal map.
Note that the value type of this map is `[]string`,
so that users can attach multiple values using a single key.
### Creating a new metadata
A metadata can be created from a `map[string]string` using function `New`:
```go
md := metadata.New(map[string]string{"key1": "val1", "key2": "val2"})
```
Another way is to use `Pairs`.
Values with the same key will be merged into a list:
```go
md := metadata.Pairs(
"key1", "val1",
"key1", "val1-2", // "key1" will have map value []string{"val1", "val1-2"}
"key2", "val2",
)
```
__Note:__ all the keys will be automatically converted to lowercase,
so "key1" and "kEy1" will be the same key and their values will be merged into the same list.
This happens for both `New` and `Pairs`.
### Storing binary data in metadata
In metadata, keys are always strings. But values can be strings or binary data.
To store binary data value in metadata, simply add "-bin" surfix to the key.
The values with "-bin" surffixed keys will be encoded when creating the metadata:
```go
md := metadata.Pairs(
"key", "string value",
"key-bin", string([]byte{96, 102}), // this binary data will be encoded (base64) before sending
// and will be decoded after being transferred.
)
```
## Retrieving metadata from context
Metadata can be retrieved from context using `FromContext`:
```go
func (s *server) SomeRPC(ctx context.Context, in *pb.SomeRequest) (*pb.SomeResponse, err) {
md, ok := metadata.FromContext(ctx)
// do something with metadata
}
```
## Sending and receiving metadata - client side
[//]: # "TODO: uncomment next line after example source added"
[//]: # "Real metadata sending and receiving examples are available [here](TODO:example_dir)."
### Sending metadata
To send metadata to server, the client can wrap the metadata into a context using `NewContext`, and make the RPC with this context:
```go
md := metadata.Pairs("key", "val")
// create a new context with this metadata
ctx := metadata.NewContext(context.Background(), md)
// make unary RPC
response, err := client.SomeRPC(ctx, someRequest)
// or make streaming RPC
stream, err := client.SomeStreamingRPC(ctx)
```
### Receiving metadata
Metadata that a client can receive includes header and trailer.
#### Unary call
Header and trailer sent along with a unary call can be retrieved using function [Header](https://godoc.org/google.golang.org/grpc#Header) and [Trailer](https://godoc.org/google.golang.org/grpc#Trailer) in [CallOption](https://godoc.org/google.golang.org/grpc#CallOption):
```go
var header, trailer metadata.MD // variable to store header and trailer
r, err := client.SomeRPC(
ctx,
someRequest,
grpc.Header(&header), // will retrieve header
grpc.Trailer(&trailer), // will retrieve trailer
)
// do something with header and trailer
```
#### Streaming call
For streaming calls including:
- Server streaming RPC
- Client streaming RPC
- Bidirectional streaming RPC
Header and trailer can be retrieved from the returned stream using function `Header` and `Trailer` in interface [ClientStream](https://godoc.org/google.golang.org/grpc#ClientStream):
```go
stream, err := client.SomeStreamingRPC(ctx)
// retrieve header
header, err := stream.Header()
// retrieve trailer
trailer := stream.Trailer()
```
## Sending and receiving metadata - server side
[//]: # "TODO: uncomment next line after example source added"
[//]: # "Real metadata sending and receiving examples are available [here](TODO:example_dir)."
### Receiving metadata
To read metadata sent by the client, the server needs to retrieve it from RPC context.
If it is a unary call, the RPC handler's context can be used.
For streaming calls, the server needs to get context from the stream.
#### Unary call
```go
func (s *server) SomeRPC(ctx context.Context, in *pb.someRequest) (*pb.someResponse, error) {
md, ok := metadata.FromContext(ctx)
// do something with metadata
}
```
#### Streaming call
```go
func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) error {
md, ok := metadata.FromContext(stream.Context()) // get context from stream
// do something with metadata
}
```
### Sending metadata
#### Unary call
To send header and trailer to client in unary call, the server can call [SendHeader](https://godoc.org/google.golang.org/grpc#SendHeader) and [SetTrailer](https://godoc.org/google.golang.org/grpc#SetTrailer) functions in module [grpc](https://godoc.org/google.golang.org/grpc).
These two functions take a context as the first parameter.
It should be the RPC handler's context or one derived from it:
```go
func (s *server) SomeRPC(ctx context.Context, in *pb.someRequest) (*pb.someResponse, error) {
// create and send header
header := metadata.Pairs("header-key", "val")
grpc.SendHeader(ctx, header)
// create and set trailer
trailer := metadata.Pairs("trailer-key", "val")
grpc.SetTrailer(ctx, trailer)
}
```
#### Streaming call
For streaming calls, header and trailer can be sent using function `SendHeader` and `SetTrailer` in interface [ServerStream](https://godoc.org/google.golang.org/grpc#ServerStream):
```go
func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) error {
// create and send header
header := metadata.Pairs("header-key", "val")
stream.SendHeader(header)
// create and set trailer
trailer := metadata.Pairs("trailer-key", "val")
stream.SetTrailer(trailer)
}
```

28
vendor/google.golang.org/grpc/LICENSE generated vendored Normal file
View file

@ -0,0 +1,28 @@
Copyright 2014, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

52
vendor/google.golang.org/grpc/Makefile generated vendored Normal file
View file

@ -0,0 +1,52 @@
all: test testrace
deps:
go get -d -v google.golang.org/grpc/...
updatedeps:
go get -d -v -u -f google.golang.org/grpc/...
testdeps:
go get -d -v -t google.golang.org/grpc/...
updatetestdeps:
go get -d -v -t -u -f google.golang.org/grpc/...
build: deps
go build google.golang.org/grpc/...
proto:
@ if ! which protoc > /dev/null; then \
echo "error: protoc not installed" >&2; \
exit 1; \
fi
go get -u -v github.com/golang/protobuf/protoc-gen-go
# use $$dir as the root for all proto files in the same directory
for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \
protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \
done
test: testdeps
go test -v -cpu 1,4 google.golang.org/grpc/...
testrace: testdeps
go test -v -race -cpu 1,4 google.golang.org/grpc/...
clean:
go clean -i google.golang.org/grpc/...
coverage: testdeps
./coverage.sh --coveralls
.PHONY: \
all \
deps \
updatedeps \
testdeps \
updatetestdeps \
build \
proto \
test \
testrace \
clean \
coverage

22
vendor/google.golang.org/grpc/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the gRPC project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of gRPC, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of gRPC. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of gRPC or any code incorporated within this
implementation of gRPC constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of gRPC
shall terminate as of the date such litigation is filed.

32
vendor/google.golang.org/grpc/README.md generated vendored Normal file
View file

@ -0,0 +1,32 @@
#gRPC-Go
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
Installation
------------
To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
```
$ go get google.golang.org/grpc
```
Prerequisites
-------------
This requires Go 1.5 or later .
Constraints
-----------
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
Documentation
-------------
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
Status
------
Beta release

80
vendor/google.golang.org/grpc/backoff.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
package grpc
import (
"math/rand"
"time"
)
// DefaultBackoffConfig uses values specified for backoff in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
var (
DefaultBackoffConfig = BackoffConfig{
MaxDelay: 120 * time.Second,
baseDelay: 1.0 * time.Second,
factor: 1.6,
jitter: 0.2,
}
)
// backoffStrategy defines the methodology for backing off after a grpc
// connection failure.
//
// This is unexported until the gRPC project decides whether or not to allow
// alternative backoff strategies. Once a decision is made, this type and its
// method may be exported.
type backoffStrategy interface {
// backoff returns the amount of time to wait before the next retry given
// the number of consecutive failures.
backoff(retries int) time.Duration
}
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
type BackoffConfig struct {
// MaxDelay is the upper bound of backoff delay.
MaxDelay time.Duration
// TODO(stevvooe): The following fields are not exported, as allowing
// changes would violate the current gRPC specification for backoff. If
// gRPC decides to allow more interesting backoff strategies, these fields
// may be opened up in the future.
// baseDelay is the amount of time to wait before retrying after the first
// failure.
baseDelay time.Duration
// factor is applied to the backoff after each retry.
factor float64
// jitter provides a range to randomize backoff delays.
jitter float64
}
func setDefaults(bc *BackoffConfig) {
md := bc.MaxDelay
*bc = DefaultBackoffConfig
if md > 0 {
bc.MaxDelay = md
}
}
func (bc BackoffConfig) backoff(retries int) (t time.Duration) {
if retries == 0 {
return bc.baseDelay
}
backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
for backoff < max && retries > 0 {
backoff *= bc.factor
retries--
}
if backoff > max {
backoff = max
}
// Randomize backoff delays so that if a cluster of requests start at
// the same time, they won't operate in lockstep.
backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
if backoff < 0 {
return 0
}
return time.Duration(backoff)
}

11
vendor/google.golang.org/grpc/backoff_test.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
package grpc
import "testing"
func TestBackoffConfigDefaults(t *testing.T) {
b := BackoffConfig{}
setDefaults(&b)
if b != DefaultBackoffConfig {
t.Fatalf("expected BackoffConfig to pickup default parameters: %v != %v", b, DefaultBackoffConfig)
}
}

385
vendor/google.golang.org/grpc/balancer.go generated vendored Normal file
View file

@ -0,0 +1,385 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"fmt"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/naming"
)
// Address represents a server the client connects to.
// This is the EXPERIMENTAL API and may be changed or extended in the future.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
Metadata interface{}
}
// BalancerGetOptions configures a Get call.
// This is the EXPERIMENTAL API and may be changed or extended in the future.
type BalancerGetOptions struct {
// BlockingWait specifies whether Get should block when there is no
// connected address.
BlockingWait bool
}
// Balancer chooses network addresses for RPCs.
// This is the EXPERIMENTAL API and may be changed or extended in the future.
type Balancer interface {
// Start does the initialization work to bootstrap a Balancer. For example,
// this function may start the name resolution and watch the updates. It will
// be called when dialing.
Start(target string) error
// Up informs the Balancer that gRPC has a connection to the server at
// addr. It returns down which is called once the connection to addr gets
// lost or closed.
// TODO: It is not clear how to construct and take advantage the meaningful error
// parameter for down. Need realistic demands to guide.
Up(addr Address) (down func(error))
// Get gets the address of a server for the RPC corresponding to ctx.
// i) If it returns a connected address, gRPC internals issues the RPC on the
// connection to this address;
// ii) If it returns an address on which the connection is under construction
// (initiated by Notify(...)) but not connected, gRPC internals
// * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
// Shutdown state;
// or
// * issues RPC on the connection otherwise.
// iii) If it returns an address on which the connection does not exist, gRPC
// internals treats it as an error and will fail the corresponding RPC.
//
// Therefore, the following is the recommended rule when writing a custom Balancer.
// If opts.BlockingWait is true, it should return a connected address or
// block if there is no connected address. It should respect the timeout or
// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
// RPCs), it should return an address it has notified via Notify(...) immediately
// instead of blocking.
//
// The function returns put which is called once the rpc has completed or failed.
// put can collect and report RPC stats to a remote load balancer.
//
// This function should only return the errors Balancer cannot recover by itself.
// gRPC internals will fail the RPC if an error is returned.
Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
// Notify returns a channel that is used by gRPC internals to watch the addresses
// gRPC needs to connect. The addresses might be from a name resolver or remote
// load balancer. gRPC internals will compare it with the existing connected
// addresses. If the address Balancer notified is not in the existing connected
// addresses, gRPC starts to connect the address. If an address in the existing
// connected addresses is not in the notification list, the corresponding connection
// is shutdown gracefully. Otherwise, there are no operations to take. Note that
// the Address slice must be the full list of the Addresses which should be connected.
// It is NOT delta.
Notify() <-chan []Address
// Close shuts down the balancer.
Close() error
}
// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
// call of Balancer.
type downErr struct {
timeout bool
temporary bool
desc string
}
func (e downErr) Error() string { return e.desc }
func (e downErr) Timeout() bool { return e.timeout }
func (e downErr) Temporary() bool { return e.temporary }
func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
return downErr{
timeout: timeout,
temporary: temporary,
desc: fmt.Sprintf(format, a...),
}
}
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
// the name resolution updates and updates the addresses available correspondingly.
func RoundRobin(r naming.Resolver) Balancer {
return &roundRobin{r: r}
}
type addrInfo struct {
addr Address
connected bool
}
type roundRobin struct {
r naming.Resolver
w naming.Watcher
addrs []*addrInfo // all the addresses the client should potentially connect
mu sync.Mutex
addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
next int // index of the next address to return for Get()
waitCh chan struct{} // the channel to block when there is no connected address available
done bool // The Balancer is closed.
}
func (rr *roundRobin) watchAddrUpdates() error {
updates, err := rr.w.Next()
if err != nil {
grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err)
return err
}
rr.mu.Lock()
defer rr.mu.Unlock()
for _, update := range updates {
addr := Address{
Addr: update.Addr,
Metadata: update.Metadata,
}
switch update.Op {
case naming.Add:
var exist bool
for _, v := range rr.addrs {
if addr == v.addr {
exist = true
grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr)
break
}
}
if exist {
continue
}
rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
case naming.Delete:
for i, v := range rr.addrs {
if addr == v.addr {
copy(rr.addrs[i:], rr.addrs[i+1:])
rr.addrs = rr.addrs[:len(rr.addrs)-1]
break
}
}
default:
grpclog.Println("Unknown update.Op ", update.Op)
}
}
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
open := make([]Address, len(rr.addrs))
for i, v := range rr.addrs {
open[i] = v.addr
}
if rr.done {
return ErrClientConnClosing
}
rr.addrCh <- open
return nil
}
func (rr *roundRobin) Start(target string) error {
if rr.r == nil {
// If there is no name resolver installed, it is not needed to
// do name resolution. In this case, target is added into rr.addrs
// as the only address available and rr.addrCh stays nil.
rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
return nil
}
w, err := rr.r.Resolve(target)
if err != nil {
return err
}
rr.w = w
rr.addrCh = make(chan []Address)
go func() {
for {
if err := rr.watchAddrUpdates(); err != nil {
return
}
}
}()
return nil
}
// Up sets the connected state of addr and sends notification if there are pending
// Get() calls.
func (rr *roundRobin) Up(addr Address) func(error) {
rr.mu.Lock()
defer rr.mu.Unlock()
var cnt int
for _, a := range rr.addrs {
if a.addr == addr {
if a.connected {
return nil
}
a.connected = true
}
if a.connected {
cnt++
}
}
// addr is only one which is connected. Notify the Get() callers who are blocking.
if cnt == 1 && rr.waitCh != nil {
close(rr.waitCh)
rr.waitCh = nil
}
return func(err error) {
rr.down(addr, err)
}
}
// down unsets the connected state of addr.
func (rr *roundRobin) down(addr Address, err error) {
rr.mu.Lock()
defer rr.mu.Unlock()
for _, a := range rr.addrs {
if addr == a.addr {
a.connected = false
break
}
}
}
// Get returns the next addr in the rotation.
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
var ch chan struct{}
rr.mu.Lock()
if rr.done {
rr.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(rr.addrs) > 0 {
if rr.next >= len(rr.addrs) {
rr.next = 0
}
next := rr.next
for {
a := rr.addrs[next]
next = (next + 1) % len(rr.addrs)
if a.connected {
addr = a.addr
rr.next = next
rr.mu.Unlock()
return
}
if next == rr.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
if !opts.BlockingWait {
if len(rr.addrs) == 0 {
rr.mu.Unlock()
err = fmt.Errorf("there is no address available")
return
}
// Returns the next addr on rr.addrs for failfast RPCs.
addr = rr.addrs[rr.next].addr
rr.next++
rr.mu.Unlock()
return
}
// Wait on rr.waitCh for non-failfast RPCs.
if rr.waitCh == nil {
ch = make(chan struct{})
rr.waitCh = ch
} else {
ch = rr.waitCh
}
rr.mu.Unlock()
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
case <-ch:
rr.mu.Lock()
if rr.done {
rr.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(rr.addrs) > 0 {
if rr.next >= len(rr.addrs) {
rr.next = 0
}
next := rr.next
for {
a := rr.addrs[next]
next = (next + 1) % len(rr.addrs)
if a.connected {
addr = a.addr
rr.next = next
rr.mu.Unlock()
return
}
if next == rr.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
// The newly added addr got removed by Down() again.
if rr.waitCh == nil {
ch = make(chan struct{})
rr.waitCh = ch
} else {
ch = rr.waitCh
}
rr.mu.Unlock()
}
}
}
func (rr *roundRobin) Notify() <-chan []Address {
return rr.addrCh
}
func (rr *roundRobin) Close() error {
rr.mu.Lock()
defer rr.mu.Unlock()
rr.done = true
if rr.w != nil {
rr.w.Close()
}
if rr.waitCh != nil {
close(rr.waitCh)
rr.waitCh = nil
}
if rr.addrCh != nil {
close(rr.addrCh)
}
return nil
}

322
vendor/google.golang.org/grpc/balancer_test.go generated vendored Normal file
View file

@ -0,0 +1,322 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"fmt"
"math"
"sync"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/naming"
)
type testWatcher struct {
// the channel to receives name resolution updates
update chan *naming.Update
// the side channel to get to know how many updates in a batch
side chan int
// the channel to notifiy update injector that the update reading is done
readDone chan int
}
func (w *testWatcher) Next() (updates []*naming.Update, err error) {
n := <-w.side
if n == 0 {
return nil, fmt.Errorf("w.side is closed")
}
for i := 0; i < n; i++ {
u := <-w.update
if u != nil {
updates = append(updates, u)
}
}
w.readDone <- 0
return
}
func (w *testWatcher) Close() {
}
// Inject naming resolution updates to the testWatcher.
func (w *testWatcher) inject(updates []*naming.Update) {
w.side <- len(updates)
for _, u := range updates {
w.update <- u
}
<-w.readDone
}
type testNameResolver struct {
w *testWatcher
addr string
}
func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) {
r.w = &testWatcher{
update: make(chan *naming.Update, 1),
side: make(chan int, 1),
readDone: make(chan int),
}
r.w.side <- 1
r.w.update <- &naming.Update{
Op: naming.Add,
Addr: r.addr,
}
go func() {
<-r.w.readDone
}()
return r.w, nil
}
func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver) {
var servers []*server
for i := 0; i < numServers; i++ {
s := newTestServer()
servers = append(servers, s)
go s.start(t, 0, maxStreams)
s.wait(t, 2*time.Second)
}
// Point to server[0]
addr := "127.0.0.1:" + servers[0].port
return servers, &testNameResolver{
addr: addr,
}
}
func TestNameDiscovery(t *testing.T) {
// Start 2 servers on 2 ports.
numServers := 2
servers, r := startServers(t, numServers, math.MaxUint32)
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
if err != nil {
t.Fatalf("Failed to create ClientConn: %v", err)
}
req := "port"
var reply string
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port {
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port)
}
// Inject the name resolution change to remove servers[0] and add servers[1].
var updates []*naming.Update
updates = append(updates, &naming.Update{
Op: naming.Delete,
Addr: "127.0.0.1:" + servers[0].port,
})
updates = append(updates, &naming.Update{
Op: naming.Add,
Addr: "127.0.0.1:" + servers[1].port,
})
r.w.inject(updates)
// Loop until the rpcs in flight talks to servers[1].
for {
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port {
break
}
time.Sleep(10 * time.Millisecond)
}
cc.Close()
for i := 0; i < numServers; i++ {
servers[i].stop()
}
}
func TestEmptyAddrs(t *testing.T) {
servers, r := startServers(t, 1, math.MaxUint32)
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
if err != nil {
t.Fatalf("Failed to create ClientConn: %v", err)
}
var reply string
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse {
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, <nil>", err, reply, expectedResponse)
}
// Inject name resolution change to remove the server so that there is no address
// available after that.
u := &naming.Update{
Op: naming.Delete,
Addr: "127.0.0.1:" + servers[0].port,
}
r.w.inject([]*naming.Update{u})
// Loop until the above updates apply.
for {
time.Sleep(10 * time.Millisecond)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond)
if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc); err != nil {
break
}
}
cc.Close()
servers[0].stop()
}
func TestRoundRobin(t *testing.T) {
// Start 3 servers on 3 ports.
numServers := 3
servers, r := startServers(t, numServers, math.MaxUint32)
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
if err != nil {
t.Fatalf("Failed to create ClientConn: %v", err)
}
// Add servers[1] to the service discovery.
u := &naming.Update{
Op: naming.Add,
Addr: "127.0.0.1:" + servers[1].port,
}
r.w.inject([]*naming.Update{u})
req := "port"
var reply string
// Loop until servers[1] is up
for {
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port {
break
}
time.Sleep(10 * time.Millisecond)
}
// Add server2[2] to the service discovery.
u = &naming.Update{
Op: naming.Add,
Addr: "127.0.0.1:" + servers[2].port,
}
r.w.inject([]*naming.Update{u})
// Loop until both servers[2] are up.
for {
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[2].port {
break
}
time.Sleep(10 * time.Millisecond)
}
// Check the incoming RPCs served in a round-robin manner.
for i := 0; i < 10; i++ {
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[i%numServers].port {
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", i, err, servers[i%numServers].port)
}
}
cc.Close()
for i := 0; i < numServers; i++ {
servers[i].stop()
}
}
func TestCloseWithPendingRPC(t *testing.T) {
servers, r := startServers(t, 1, math.MaxUint32)
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
if err != nil {
t.Fatalf("Failed to create ClientConn: %v", err)
}
var reply string
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil {
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port)
}
// Remove the server.
updates := []*naming.Update{&naming.Update{
Op: naming.Delete,
Addr: "127.0.0.1:" + servers[0].port,
}}
r.w.inject(updates)
// Loop until the above update applies.
for {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond)
if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded {
break
}
time.Sleep(10 * time.Millisecond)
}
// Issue 2 RPCs which should be completed with error status once cc is closed.
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
var reply string
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil {
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err)
}
}()
go func() {
defer wg.Done()
var reply string
time.Sleep(5 * time.Millisecond)
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil {
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err)
}
}()
time.Sleep(5 * time.Millisecond)
cc.Close()
wg.Wait()
servers[0].stop()
}
func TestGetOnWaitChannel(t *testing.T) {
servers, r := startServers(t, 1, math.MaxUint32)
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
if err != nil {
t.Fatalf("Failed to create ClientConn: %v", err)
}
// Remove all servers so that all upcoming RPCs will block on waitCh.
updates := []*naming.Update{&naming.Update{
Op: naming.Delete,
Addr: "127.0.0.1:" + servers[0].port,
}}
r.w.inject(updates)
for {
var reply string
ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond)
if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded {
break
}
time.Sleep(10 * time.Millisecond)
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var reply string
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil {
t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want <nil>", err)
}
}()
// Add a connected server to get the above RPC through.
updates = []*naming.Update{&naming.Update{
Op: naming.Add,
Addr: "127.0.0.1:" + servers[0].port,
}}
r.w.inject(updates)
// Wait until the above RPC succeeds.
wg.Wait()
cc.Close()
servers[0].stop()
}

224
vendor/google.golang.org/grpc/benchmark/benchmark.go generated vendored Normal file
View file

@ -0,0 +1,224 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks.
*/
package benchmark
import (
"fmt"
"io"
"net"
"golang.org/x/net/context"
"google.golang.org/grpc"
testpb "google.golang.org/grpc/benchmark/grpc_testing"
"google.golang.org/grpc/grpclog"
)
func newPayload(t testpb.PayloadType, size int) *testpb.Payload {
if size < 0 {
grpclog.Fatalf("Requested a response with invalid length %d", size)
}
body := make([]byte, size)
switch t {
case testpb.PayloadType_COMPRESSABLE:
case testpb.PayloadType_UNCOMPRESSABLE:
grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported")
default:
grpclog.Fatalf("Unsupported payload type: %d", t)
}
return &testpb.Payload{
Type: t,
Body: body,
}
}
type testServer struct {
}
func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
return &testpb.SimpleResponse{
Payload: newPayload(in.ResponseType, int(in.ResponseSize)),
}, nil
}
func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error {
for {
in, err := stream.Recv()
if err == io.EOF {
// read done.
return nil
}
if err != nil {
return err
}
if err := stream.Send(&testpb.SimpleResponse{
Payload: newPayload(in.ResponseType, int(in.ResponseSize)),
}); err != nil {
return err
}
}
}
// byteBufServer is a gRPC server that sends and receives byte buffer.
// The purpose is to benchmark the gRPC performance without protobuf serialization/deserialization overhead.
type byteBufServer struct {
respSize int32
}
// UnaryCall is an empty function and is not used for benchmark.
// If bytebuf UnaryCall benchmark is needed later, the function body needs to be updated.
func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
return &testpb.SimpleResponse{}, nil
}
func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error {
for {
var in []byte
err := stream.(grpc.ServerStream).RecvMsg(&in)
if err == io.EOF {
return nil
}
if err != nil {
return err
}
out := make([]byte, s.respSize)
if err := stream.(grpc.ServerStream).SendMsg(&out); err != nil {
return err
}
}
}
// ServerInfo contains the information to create a gRPC benchmark server.
type ServerInfo struct {
// Addr is the address of the server.
Addr string
// Type is the type of the server.
// It should be "protobuf" or "bytebuf".
Type string
// Metadata is an optional configuration.
// For "protobuf", it's ignored.
// For "bytebuf", it should be an int representing response size.
Metadata interface{}
}
// StartServer starts a gRPC server serving a benchmark service according to info.
// It returns its listen address and a function to stop the server.
func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) {
lis, err := net.Listen("tcp", info.Addr)
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
s := grpc.NewServer(opts...)
switch info.Type {
case "protobuf":
testpb.RegisterBenchmarkServiceServer(s, &testServer{})
case "bytebuf":
respSize, ok := info.Metadata.(int32)
if !ok {
grpclog.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type)
}
testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize})
default:
grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type)
}
go s.Serve(lis)
return lis.Addr().String(), func() {
s.Stop()
}
}
// DoUnaryCall performs an unary RPC with given stub and request and response sizes.
func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error {
pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize)
req := &testpb.SimpleRequest{
ResponseType: pl.Type,
ResponseSize: int32(respSize),
Payload: pl,
}
if _, err := tc.UnaryCall(context.Background(), req); err != nil {
return fmt.Errorf("/BenchmarkService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
}
return nil
}
// DoStreamingRoundTrip performs a round trip for a single streaming rpc.
func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error {
pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize)
req := &testpb.SimpleRequest{
ResponseType: pl.Type,
ResponseSize: int32(respSize),
Payload: pl,
}
if err := stream.Send(req); err != nil {
return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want <nil>", err)
}
if _, err := stream.Recv(); err != nil {
// EOF is a valid error here.
if err == io.EOF {
return nil
}
return fmt.Errorf("/BenchmarkService/StreamingCall.Recv(_) = %v, want <nil>", err)
}
return nil
}
// DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using a custom codec for byte buffer.
func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error {
out := make([]byte, reqSize)
if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil {
return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).SendMsg(_) = %v, want <nil>", err)
}
var in []byte
if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil {
// EOF is a valid error here.
if err == io.EOF {
return nil
}
return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).RecvMsg(_) = %v, want <nil>", err)
}
return nil
}
// NewClientConn creates a gRPC client connection to addr.
func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn {
conn, err := grpc.Dial(addr, opts...)
if err != nil {
grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err)
}
return conn
}

View file

@ -0,0 +1,202 @@
package benchmark
import (
"os"
"sync"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
testpb "google.golang.org/grpc/benchmark/grpc_testing"
"google.golang.org/grpc/benchmark/stats"
"google.golang.org/grpc/grpclog"
)
func runUnary(b *testing.B, maxConcurrentCalls int) {
s := stats.AddStats(b, 38)
b.StopTimer()
target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"})
defer stopper()
conn := NewClientConn(target, grpc.WithInsecure())
tc := testpb.NewBenchmarkServiceClient(conn)
// Warm up connection.
for i := 0; i < 10; i++ {
unaryCaller(tc)
}
ch := make(chan int, maxConcurrentCalls*4)
var (
mu sync.Mutex
wg sync.WaitGroup
)
wg.Add(maxConcurrentCalls)
// Distribute the b.N calls over maxConcurrentCalls workers.
for i := 0; i < maxConcurrentCalls; i++ {
go func() {
for range ch {
start := time.Now()
unaryCaller(tc)
elapse := time.Since(start)
mu.Lock()
s.Add(elapse)
mu.Unlock()
}
wg.Done()
}()
}
b.StartTimer()
for i := 0; i < b.N; i++ {
ch <- i
}
b.StopTimer()
close(ch)
wg.Wait()
conn.Close()
}
func runStream(b *testing.B, maxConcurrentCalls int) {
s := stats.AddStats(b, 38)
b.StopTimer()
target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"})
defer stopper()
conn := NewClientConn(target, grpc.WithInsecure())
tc := testpb.NewBenchmarkServiceClient(conn)
// Warm up connection.
stream, err := tc.StreamingCall(context.Background())
if err != nil {
b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
}
for i := 0; i < 10; i++ {
streamCaller(stream)
}
ch := make(chan int, maxConcurrentCalls*4)
var (
mu sync.Mutex
wg sync.WaitGroup
)
wg.Add(maxConcurrentCalls)
// Distribute the b.N calls over maxConcurrentCalls workers.
for i := 0; i < maxConcurrentCalls; i++ {
go func() {
stream, err := tc.StreamingCall(context.Background())
if err != nil {
b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
}
for range ch {
start := time.Now()
streamCaller(stream)
elapse := time.Since(start)
mu.Lock()
s.Add(elapse)
mu.Unlock()
}
wg.Done()
}()
}
b.StartTimer()
for i := 0; i < b.N; i++ {
ch <- i
}
b.StopTimer()
close(ch)
wg.Wait()
conn.Close()
}
func unaryCaller(client testpb.BenchmarkServiceClient) {
if err := DoUnaryCall(client, 1, 1); err != nil {
grpclog.Fatalf("DoUnaryCall failed: %v", err)
}
}
func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) {
if err := DoStreamingRoundTrip(stream, 1, 1); err != nil {
grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err)
}
}
func BenchmarkClientStreamc1(b *testing.B) {
grpc.EnableTracing = true
runStream(b, 1)
}
func BenchmarkClientStreamc8(b *testing.B) {
grpc.EnableTracing = true
runStream(b, 8)
}
func BenchmarkClientStreamc64(b *testing.B) {
grpc.EnableTracing = true
runStream(b, 64)
}
func BenchmarkClientStreamc512(b *testing.B) {
grpc.EnableTracing = true
runStream(b, 512)
}
func BenchmarkClientUnaryc1(b *testing.B) {
grpc.EnableTracing = true
runUnary(b, 1)
}
func BenchmarkClientUnaryc8(b *testing.B) {
grpc.EnableTracing = true
runUnary(b, 8)
}
func BenchmarkClientUnaryc64(b *testing.B) {
grpc.EnableTracing = true
runUnary(b, 64)
}
func BenchmarkClientUnaryc512(b *testing.B) {
grpc.EnableTracing = true
runUnary(b, 512)
}
func BenchmarkClientStreamNoTracec1(b *testing.B) {
grpc.EnableTracing = false
runStream(b, 1)
}
func BenchmarkClientStreamNoTracec8(b *testing.B) {
grpc.EnableTracing = false
runStream(b, 8)
}
func BenchmarkClientStreamNoTracec64(b *testing.B) {
grpc.EnableTracing = false
runStream(b, 64)
}
func BenchmarkClientStreamNoTracec512(b *testing.B) {
grpc.EnableTracing = false
runStream(b, 512)
}
func BenchmarkClientUnaryNoTracec1(b *testing.B) {
grpc.EnableTracing = false
runUnary(b, 1)
}
func BenchmarkClientUnaryNoTracec8(b *testing.B) {
grpc.EnableTracing = false
runUnary(b, 8)
}
func BenchmarkClientUnaryNoTracec64(b *testing.B) {
grpc.EnableTracing = false
runUnary(b, 64)
}
func BenchmarkClientUnaryNoTracec512(b *testing.B) {
grpc.EnableTracing = false
runUnary(b, 512)
}
func TestMain(m *testing.M) {
os.Exit(stats.RunTestMain(m))
}

162
vendor/google.golang.org/grpc/benchmark/client/main.go generated vendored Normal file
View file

@ -0,0 +1,162 @@
package main
import (
"flag"
"math"
"net"
"net/http"
_ "net/http/pprof"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/benchmark"
testpb "google.golang.org/grpc/benchmark/grpc_testing"
"google.golang.org/grpc/benchmark/stats"
"google.golang.org/grpc/grpclog"
)
var (
server = flag.String("server", "", "The server address")
maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs")
duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client")
trace = flag.Bool("trace", true, "Whether tracing is on")
rpcType = flag.Int("rpc_type", 0,
`Configure different client rpc type. Valid options are:
0 : unary call;
1 : streaming call.`)
)
func unaryCaller(client testpb.BenchmarkServiceClient) {
benchmark.DoUnaryCall(client, 1, 1)
}
func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) {
benchmark.DoStreamingRoundTrip(stream, 1, 1)
}
func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.BenchmarkServiceClient) {
s = stats.NewStats(256)
conn = benchmark.NewClientConn(*server)
tc = testpb.NewBenchmarkServiceClient(conn)
return s, conn, tc
}
func closeLoopUnary() {
s, conn, tc := buildConnection()
for i := 0; i < 100; i++ {
unaryCaller(tc)
}
ch := make(chan int, *maxConcurrentRPCs*4)
var (
mu sync.Mutex
wg sync.WaitGroup
)
wg.Add(*maxConcurrentRPCs)
for i := 0; i < *maxConcurrentRPCs; i++ {
go func() {
for _ = range ch {
start := time.Now()
unaryCaller(tc)
elapse := time.Since(start)
mu.Lock()
s.Add(elapse)
mu.Unlock()
}
wg.Done()
}()
}
// Stop the client when time is up.
done := make(chan struct{})
go func() {
<-time.After(time.Duration(*duration) * time.Second)
close(done)
}()
ok := true
for ok {
select {
case ch <- 0:
case <-done:
ok = false
}
}
close(ch)
wg.Wait()
conn.Close()
grpclog.Println(s.String())
}
func closeLoopStream() {
s, conn, tc := buildConnection()
ch := make(chan int, *maxConcurrentRPCs*4)
var (
mu sync.Mutex
wg sync.WaitGroup
)
wg.Add(*maxConcurrentRPCs)
// Distribute RPCs over maxConcurrentCalls workers.
for i := 0; i < *maxConcurrentRPCs; i++ {
go func() {
stream, err := tc.StreamingCall(context.Background())
if err != nil {
grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
}
// Do some warm up.
for i := 0; i < 100; i++ {
streamCaller(stream)
}
for range ch {
start := time.Now()
streamCaller(stream)
elapse := time.Since(start)
mu.Lock()
s.Add(elapse)
mu.Unlock()
}
wg.Done()
}()
}
// Stop the client when time is up.
done := make(chan struct{})
go func() {
<-time.After(time.Duration(*duration) * time.Second)
close(done)
}()
ok := true
for ok {
select {
case ch <- 0:
case <-done:
ok = false
}
}
close(ch)
wg.Wait()
conn.Close()
grpclog.Println(s.String())
}
func main() {
flag.Parse()
grpc.EnableTracing = *trace
go func() {
lis, err := net.Listen("tcp", ":0")
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
grpclog.Println("Client profiling address: ", lis.Addr().String())
if err := http.Serve(lis, nil); err != nil {
grpclog.Fatalf("Failed to serve: %v", err)
}
}()
switch *rpcType {
case 0:
closeLoopUnary()
case 1:
closeLoopStream()
}
}

View file

@ -0,0 +1,977 @@
// Code generated by protoc-gen-go.
// source: control.proto
// DO NOT EDIT!
/*
Package grpc_testing is a generated protocol buffer package.
It is generated from these files:
control.proto
messages.proto
payloads.proto
services.proto
stats.proto
It has these top-level messages:
PoissonParams
UniformParams
DeterministicParams
ParetoParams
ClosedLoopParams
LoadParams
SecurityParams
ClientConfig
ClientStatus
Mark
ClientArgs
ServerConfig
ServerArgs
ServerStatus
CoreRequest
CoreResponse
Void
Scenario
Scenarios
Payload
EchoStatus
SimpleRequest
SimpleResponse
StreamingInputCallRequest
StreamingInputCallResponse
ResponseParameters
StreamingOutputCallRequest
StreamingOutputCallResponse
ReconnectParams
ReconnectInfo
ByteBufferParams
SimpleProtoParams
ComplexProtoParams
PayloadConfig
ServerStats
HistogramParams
HistogramData
ClientStats
*/
package grpc_testing
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ClientType int32
const (
ClientType_SYNC_CLIENT ClientType = 0
ClientType_ASYNC_CLIENT ClientType = 1
)
var ClientType_name = map[int32]string{
0: "SYNC_CLIENT",
1: "ASYNC_CLIENT",
}
var ClientType_value = map[string]int32{
"SYNC_CLIENT": 0,
"ASYNC_CLIENT": 1,
}
func (x ClientType) String() string {
return proto.EnumName(ClientType_name, int32(x))
}
func (ClientType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type ServerType int32
const (
ServerType_SYNC_SERVER ServerType = 0
ServerType_ASYNC_SERVER ServerType = 1
ServerType_ASYNC_GENERIC_SERVER ServerType = 2
)
var ServerType_name = map[int32]string{
0: "SYNC_SERVER",
1: "ASYNC_SERVER",
2: "ASYNC_GENERIC_SERVER",
}
var ServerType_value = map[string]int32{
"SYNC_SERVER": 0,
"ASYNC_SERVER": 1,
"ASYNC_GENERIC_SERVER": 2,
}
func (x ServerType) String() string {
return proto.EnumName(ServerType_name, int32(x))
}
func (ServerType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type RpcType int32
const (
RpcType_UNARY RpcType = 0
RpcType_STREAMING RpcType = 1
)
var RpcType_name = map[int32]string{
0: "UNARY",
1: "STREAMING",
}
var RpcType_value = map[string]int32{
"UNARY": 0,
"STREAMING": 1,
}
func (x RpcType) String() string {
return proto.EnumName(RpcType_name, int32(x))
}
func (RpcType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// Parameters of poisson process distribution, which is a good representation
// of activity coming in from independent identical stationary sources.
type PoissonParams struct {
// The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"`
}
func (m *PoissonParams) Reset() { *m = PoissonParams{} }
func (m *PoissonParams) String() string { return proto.CompactTextString(m) }
func (*PoissonParams) ProtoMessage() {}
func (*PoissonParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type UniformParams struct {
InterarrivalLo float64 `protobuf:"fixed64,1,opt,name=interarrival_lo,json=interarrivalLo" json:"interarrival_lo,omitempty"`
InterarrivalHi float64 `protobuf:"fixed64,2,opt,name=interarrival_hi,json=interarrivalHi" json:"interarrival_hi,omitempty"`
}
func (m *UniformParams) Reset() { *m = UniformParams{} }
func (m *UniformParams) String() string { return proto.CompactTextString(m) }
func (*UniformParams) ProtoMessage() {}
func (*UniformParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type DeterministicParams struct {
OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"`
}
func (m *DeterministicParams) Reset() { *m = DeterministicParams{} }
func (m *DeterministicParams) String() string { return proto.CompactTextString(m) }
func (*DeterministicParams) ProtoMessage() {}
func (*DeterministicParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type ParetoParams struct {
InterarrivalBase float64 `protobuf:"fixed64,1,opt,name=interarrival_base,json=interarrivalBase" json:"interarrival_base,omitempty"`
Alpha float64 `protobuf:"fixed64,2,opt,name=alpha" json:"alpha,omitempty"`
}
func (m *ParetoParams) Reset() { *m = ParetoParams{} }
func (m *ParetoParams) String() string { return proto.CompactTextString(m) }
func (*ParetoParams) ProtoMessage() {}
func (*ParetoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// Once an RPC finishes, immediately start a new one.
// No configuration parameters needed.
type ClosedLoopParams struct {
}
func (m *ClosedLoopParams) Reset() { *m = ClosedLoopParams{} }
func (m *ClosedLoopParams) String() string { return proto.CompactTextString(m) }
func (*ClosedLoopParams) ProtoMessage() {}
func (*ClosedLoopParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
type LoadParams struct {
// Types that are valid to be assigned to Load:
// *LoadParams_ClosedLoop
// *LoadParams_Poisson
// *LoadParams_Uniform
// *LoadParams_Determ
// *LoadParams_Pareto
Load isLoadParams_Load `protobuf_oneof:"load"`
}
func (m *LoadParams) Reset() { *m = LoadParams{} }
func (m *LoadParams) String() string { return proto.CompactTextString(m) }
func (*LoadParams) ProtoMessage() {}
func (*LoadParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
type isLoadParams_Load interface {
isLoadParams_Load()
}
type LoadParams_ClosedLoop struct {
ClosedLoop *ClosedLoopParams `protobuf:"bytes,1,opt,name=closed_loop,json=closedLoop,oneof"`
}
type LoadParams_Poisson struct {
Poisson *PoissonParams `protobuf:"bytes,2,opt,name=poisson,oneof"`
}
type LoadParams_Uniform struct {
Uniform *UniformParams `protobuf:"bytes,3,opt,name=uniform,oneof"`
}
type LoadParams_Determ struct {
Determ *DeterministicParams `protobuf:"bytes,4,opt,name=determ,oneof"`
}
type LoadParams_Pareto struct {
Pareto *ParetoParams `protobuf:"bytes,5,opt,name=pareto,oneof"`
}
func (*LoadParams_ClosedLoop) isLoadParams_Load() {}
func (*LoadParams_Poisson) isLoadParams_Load() {}
func (*LoadParams_Uniform) isLoadParams_Load() {}
func (*LoadParams_Determ) isLoadParams_Load() {}
func (*LoadParams_Pareto) isLoadParams_Load() {}
func (m *LoadParams) GetLoad() isLoadParams_Load {
if m != nil {
return m.Load
}
return nil
}
func (m *LoadParams) GetClosedLoop() *ClosedLoopParams {
if x, ok := m.GetLoad().(*LoadParams_ClosedLoop); ok {
return x.ClosedLoop
}
return nil
}
func (m *LoadParams) GetPoisson() *PoissonParams {
if x, ok := m.GetLoad().(*LoadParams_Poisson); ok {
return x.Poisson
}
return nil
}
func (m *LoadParams) GetUniform() *UniformParams {
if x, ok := m.GetLoad().(*LoadParams_Uniform); ok {
return x.Uniform
}
return nil
}
func (m *LoadParams) GetDeterm() *DeterministicParams {
if x, ok := m.GetLoad().(*LoadParams_Determ); ok {
return x.Determ
}
return nil
}
func (m *LoadParams) GetPareto() *ParetoParams {
if x, ok := m.GetLoad().(*LoadParams_Pareto); ok {
return x.Pareto
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*LoadParams) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _LoadParams_OneofMarshaler, _LoadParams_OneofUnmarshaler, _LoadParams_OneofSizer, []interface{}{
(*LoadParams_ClosedLoop)(nil),
(*LoadParams_Poisson)(nil),
(*LoadParams_Uniform)(nil),
(*LoadParams_Determ)(nil),
(*LoadParams_Pareto)(nil),
}
}
func _LoadParams_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*LoadParams)
// load
switch x := m.Load.(type) {
case *LoadParams_ClosedLoop:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ClosedLoop); err != nil {
return err
}
case *LoadParams_Poisson:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Poisson); err != nil {
return err
}
case *LoadParams_Uniform:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Uniform); err != nil {
return err
}
case *LoadParams_Determ:
b.EncodeVarint(4<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Determ); err != nil {
return err
}
case *LoadParams_Pareto:
b.EncodeVarint(5<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Pareto); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("LoadParams.Load has unexpected type %T", x)
}
return nil
}
func _LoadParams_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*LoadParams)
switch tag {
case 1: // load.closed_loop
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ClosedLoopParams)
err := b.DecodeMessage(msg)
m.Load = &LoadParams_ClosedLoop{msg}
return true, err
case 2: // load.poisson
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(PoissonParams)
err := b.DecodeMessage(msg)
m.Load = &LoadParams_Poisson{msg}
return true, err
case 3: // load.uniform
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(UniformParams)
err := b.DecodeMessage(msg)
m.Load = &LoadParams_Uniform{msg}
return true, err
case 4: // load.determ
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(DeterministicParams)
err := b.DecodeMessage(msg)
m.Load = &LoadParams_Determ{msg}
return true, err
case 5: // load.pareto
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ParetoParams)
err := b.DecodeMessage(msg)
m.Load = &LoadParams_Pareto{msg}
return true, err
default:
return false, nil
}
}
func _LoadParams_OneofSizer(msg proto.Message) (n int) {
m := msg.(*LoadParams)
// load
switch x := m.Load.(type) {
case *LoadParams_ClosedLoop:
s := proto.Size(x.ClosedLoop)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadParams_Poisson:
s := proto.Size(x.Poisson)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadParams_Uniform:
s := proto.Size(x.Uniform)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadParams_Determ:
s := proto.Size(x.Determ)
n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadParams_Pareto:
s := proto.Size(x.Pareto)
n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// presence of SecurityParams implies use of TLS
type SecurityParams struct {
UseTestCa bool `protobuf:"varint,1,opt,name=use_test_ca,json=useTestCa" json:"use_test_ca,omitempty"`
ServerHostOverride string `protobuf:"bytes,2,opt,name=server_host_override,json=serverHostOverride" json:"server_host_override,omitempty"`
}
func (m *SecurityParams) Reset() { *m = SecurityParams{} }
func (m *SecurityParams) String() string { return proto.CompactTextString(m) }
func (*SecurityParams) ProtoMessage() {}
func (*SecurityParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type ClientConfig struct {
// List of targets to connect to. At least one target needs to be specified.
ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets,json=serverTargets" json:"server_targets,omitempty"`
ClientType ClientType `protobuf:"varint,2,opt,name=client_type,json=clientType,enum=grpc.testing.ClientType" json:"client_type,omitempty"`
SecurityParams *SecurityParams `protobuf:"bytes,3,opt,name=security_params,json=securityParams" json:"security_params,omitempty"`
// How many concurrent RPCs to start for each channel.
// For synchronous client, use a separate thread for each outstanding RPC.
OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel,json=outstandingRpcsPerChannel" json:"outstanding_rpcs_per_channel,omitempty"`
// Number of independent client channels to create.
// i-th channel will connect to server_target[i % server_targets.size()]
ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels,json=clientChannels" json:"client_channels,omitempty"`
// Only for async client. Number of threads to use to start/manage RPCs.
AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads,json=asyncClientThreads" json:"async_client_threads,omitempty"`
RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,json=rpcType,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"`
// The requested load for the entire client (aggregated over all the threads).
LoadParams *LoadParams `protobuf:"bytes,10,opt,name=load_params,json=loadParams" json:"load_params,omitempty"`
PayloadConfig *PayloadConfig `protobuf:"bytes,11,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"`
HistogramParams *HistogramParams `protobuf:"bytes,12,opt,name=histogram_params,json=histogramParams" json:"histogram_params,omitempty"`
// Specify the cores we should run the client on, if desired
CoreList []int32 `protobuf:"varint,13,rep,name=core_list,json=coreList" json:"core_list,omitempty"`
CoreLimit int32 `protobuf:"varint,14,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"`
}
func (m *ClientConfig) Reset() { *m = ClientConfig{} }
func (m *ClientConfig) String() string { return proto.CompactTextString(m) }
func (*ClientConfig) ProtoMessage() {}
func (*ClientConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *ClientConfig) GetSecurityParams() *SecurityParams {
if m != nil {
return m.SecurityParams
}
return nil
}
func (m *ClientConfig) GetLoadParams() *LoadParams {
if m != nil {
return m.LoadParams
}
return nil
}
func (m *ClientConfig) GetPayloadConfig() *PayloadConfig {
if m != nil {
return m.PayloadConfig
}
return nil
}
func (m *ClientConfig) GetHistogramParams() *HistogramParams {
if m != nil {
return m.HistogramParams
}
return nil
}
type ClientStatus struct {
Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"`
}
func (m *ClientStatus) Reset() { *m = ClientStatus{} }
func (m *ClientStatus) String() string { return proto.CompactTextString(m) }
func (*ClientStatus) ProtoMessage() {}
func (*ClientStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *ClientStatus) GetStats() *ClientStats {
if m != nil {
return m.Stats
}
return nil
}
// Request current stats
type Mark struct {
// if true, the stats will be reset after taking their snapshot.
Reset_ bool `protobuf:"varint,1,opt,name=reset" json:"reset,omitempty"`
}
func (m *Mark) Reset() { *m = Mark{} }
func (m *Mark) String() string { return proto.CompactTextString(m) }
func (*Mark) ProtoMessage() {}
func (*Mark) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
type ClientArgs struct {
// Types that are valid to be assigned to Argtype:
// *ClientArgs_Setup
// *ClientArgs_Mark
Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"`
}
func (m *ClientArgs) Reset() { *m = ClientArgs{} }
func (m *ClientArgs) String() string { return proto.CompactTextString(m) }
func (*ClientArgs) ProtoMessage() {}
func (*ClientArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
type isClientArgs_Argtype interface {
isClientArgs_Argtype()
}
type ClientArgs_Setup struct {
Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup,oneof"`
}
type ClientArgs_Mark struct {
Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"`
}
func (*ClientArgs_Setup) isClientArgs_Argtype() {}
func (*ClientArgs_Mark) isClientArgs_Argtype() {}
func (m *ClientArgs) GetArgtype() isClientArgs_Argtype {
if m != nil {
return m.Argtype
}
return nil
}
func (m *ClientArgs) GetSetup() *ClientConfig {
if x, ok := m.GetArgtype().(*ClientArgs_Setup); ok {
return x.Setup
}
return nil
}
func (m *ClientArgs) GetMark() *Mark {
if x, ok := m.GetArgtype().(*ClientArgs_Mark); ok {
return x.Mark
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*ClientArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _ClientArgs_OneofMarshaler, _ClientArgs_OneofUnmarshaler, _ClientArgs_OneofSizer, []interface{}{
(*ClientArgs_Setup)(nil),
(*ClientArgs_Mark)(nil),
}
}
func _ClientArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*ClientArgs)
// argtype
switch x := m.Argtype.(type) {
case *ClientArgs_Setup:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Setup); err != nil {
return err
}
case *ClientArgs_Mark:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Mark); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("ClientArgs.Argtype has unexpected type %T", x)
}
return nil
}
func _ClientArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*ClientArgs)
switch tag {
case 1: // argtype.setup
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ClientConfig)
err := b.DecodeMessage(msg)
m.Argtype = &ClientArgs_Setup{msg}
return true, err
case 2: // argtype.mark
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Mark)
err := b.DecodeMessage(msg)
m.Argtype = &ClientArgs_Mark{msg}
return true, err
default:
return false, nil
}
}
func _ClientArgs_OneofSizer(msg proto.Message) (n int) {
m := msg.(*ClientArgs)
// argtype
switch x := m.Argtype.(type) {
case *ClientArgs_Setup:
s := proto.Size(x.Setup)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ClientArgs_Mark:
s := proto.Size(x.Mark)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type ServerConfig struct {
ServerType ServerType `protobuf:"varint,1,opt,name=server_type,json=serverType,enum=grpc.testing.ServerType" json:"server_type,omitempty"`
SecurityParams *SecurityParams `protobuf:"bytes,2,opt,name=security_params,json=securityParams" json:"security_params,omitempty"`
// Port on which to listen. Zero means pick unused port.
Port int32 `protobuf:"varint,4,opt,name=port" json:"port,omitempty"`
// Only for async server. Number of threads used to serve the requests.
AsyncServerThreads int32 `protobuf:"varint,7,opt,name=async_server_threads,json=asyncServerThreads" json:"async_server_threads,omitempty"`
// Specify the number of cores to limit server to, if desired
CoreLimit int32 `protobuf:"varint,8,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"`
// payload config, used in generic server
PayloadConfig *PayloadConfig `protobuf:"bytes,9,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"`
// Specify the cores we should run the server on, if desired
CoreList []int32 `protobuf:"varint,10,rep,name=core_list,json=coreList" json:"core_list,omitempty"`
}
func (m *ServerConfig) Reset() { *m = ServerConfig{} }
func (m *ServerConfig) String() string { return proto.CompactTextString(m) }
func (*ServerConfig) ProtoMessage() {}
func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (m *ServerConfig) GetSecurityParams() *SecurityParams {
if m != nil {
return m.SecurityParams
}
return nil
}
func (m *ServerConfig) GetPayloadConfig() *PayloadConfig {
if m != nil {
return m.PayloadConfig
}
return nil
}
type ServerArgs struct {
// Types that are valid to be assigned to Argtype:
// *ServerArgs_Setup
// *ServerArgs_Mark
Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"`
}
func (m *ServerArgs) Reset() { *m = ServerArgs{} }
func (m *ServerArgs) String() string { return proto.CompactTextString(m) }
func (*ServerArgs) ProtoMessage() {}
func (*ServerArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
type isServerArgs_Argtype interface {
isServerArgs_Argtype()
}
type ServerArgs_Setup struct {
Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup,oneof"`
}
type ServerArgs_Mark struct {
Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"`
}
func (*ServerArgs_Setup) isServerArgs_Argtype() {}
func (*ServerArgs_Mark) isServerArgs_Argtype() {}
func (m *ServerArgs) GetArgtype() isServerArgs_Argtype {
if m != nil {
return m.Argtype
}
return nil
}
func (m *ServerArgs) GetSetup() *ServerConfig {
if x, ok := m.GetArgtype().(*ServerArgs_Setup); ok {
return x.Setup
}
return nil
}
func (m *ServerArgs) GetMark() *Mark {
if x, ok := m.GetArgtype().(*ServerArgs_Mark); ok {
return x.Mark
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*ServerArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _ServerArgs_OneofMarshaler, _ServerArgs_OneofUnmarshaler, _ServerArgs_OneofSizer, []interface{}{
(*ServerArgs_Setup)(nil),
(*ServerArgs_Mark)(nil),
}
}
func _ServerArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*ServerArgs)
// argtype
switch x := m.Argtype.(type) {
case *ServerArgs_Setup:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Setup); err != nil {
return err
}
case *ServerArgs_Mark:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Mark); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("ServerArgs.Argtype has unexpected type %T", x)
}
return nil
}
func _ServerArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*ServerArgs)
switch tag {
case 1: // argtype.setup
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ServerConfig)
err := b.DecodeMessage(msg)
m.Argtype = &ServerArgs_Setup{msg}
return true, err
case 2: // argtype.mark
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Mark)
err := b.DecodeMessage(msg)
m.Argtype = &ServerArgs_Mark{msg}
return true, err
default:
return false, nil
}
}
func _ServerArgs_OneofSizer(msg proto.Message) (n int) {
m := msg.(*ServerArgs)
// argtype
switch x := m.Argtype.(type) {
case *ServerArgs_Setup:
s := proto.Size(x.Setup)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ServerArgs_Mark:
s := proto.Size(x.Mark)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type ServerStatus struct {
Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"`
// the port bound by the server
Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
// Number of cores available to the server
Cores int32 `protobuf:"varint,3,opt,name=cores" json:"cores,omitempty"`
}
func (m *ServerStatus) Reset() { *m = ServerStatus{} }
func (m *ServerStatus) String() string { return proto.CompactTextString(m) }
func (*ServerStatus) ProtoMessage() {}
func (*ServerStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
func (m *ServerStatus) GetStats() *ServerStats {
if m != nil {
return m.Stats
}
return nil
}
type CoreRequest struct {
}
func (m *CoreRequest) Reset() { *m = CoreRequest{} }
func (m *CoreRequest) String() string { return proto.CompactTextString(m) }
func (*CoreRequest) ProtoMessage() {}
func (*CoreRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
type CoreResponse struct {
// Number of cores available on the server
Cores int32 `protobuf:"varint,1,opt,name=cores" json:"cores,omitempty"`
}
func (m *CoreResponse) Reset() { *m = CoreResponse{} }
func (m *CoreResponse) String() string { return proto.CompactTextString(m) }
func (*CoreResponse) ProtoMessage() {}
func (*CoreResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
type Void struct {
}
func (m *Void) Reset() { *m = Void{} }
func (m *Void) String() string { return proto.CompactTextString(m) }
func (*Void) ProtoMessage() {}
func (*Void) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
// A single performance scenario: input to qps_json_driver
type Scenario struct {
// Human readable name for this scenario
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Client configuration
ClientConfig *ClientConfig `protobuf:"bytes,2,opt,name=client_config,json=clientConfig" json:"client_config,omitempty"`
// Number of clients to start for the test
NumClients int32 `protobuf:"varint,3,opt,name=num_clients,json=numClients" json:"num_clients,omitempty"`
// Server configuration
ServerConfig *ServerConfig `protobuf:"bytes,4,opt,name=server_config,json=serverConfig" json:"server_config,omitempty"`
// Number of servers to start for the test
NumServers int32 `protobuf:"varint,5,opt,name=num_servers,json=numServers" json:"num_servers,omitempty"`
// Warmup period, in seconds
WarmupSeconds int32 `protobuf:"varint,6,opt,name=warmup_seconds,json=warmupSeconds" json:"warmup_seconds,omitempty"`
// Benchmark time, in seconds
BenchmarkSeconds int32 `protobuf:"varint,7,opt,name=benchmark_seconds,json=benchmarkSeconds" json:"benchmark_seconds,omitempty"`
// Number of workers to spawn locally (usually zero)
SpawnLocalWorkerCount int32 `protobuf:"varint,8,opt,name=spawn_local_worker_count,json=spawnLocalWorkerCount" json:"spawn_local_worker_count,omitempty"`
}
func (m *Scenario) Reset() { *m = Scenario{} }
func (m *Scenario) String() string { return proto.CompactTextString(m) }
func (*Scenario) ProtoMessage() {}
func (*Scenario) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
func (m *Scenario) GetClientConfig() *ClientConfig {
if m != nil {
return m.ClientConfig
}
return nil
}
func (m *Scenario) GetServerConfig() *ServerConfig {
if m != nil {
return m.ServerConfig
}
return nil
}
// A set of scenarios to be run with qps_json_driver
type Scenarios struct {
Scenarios []*Scenario `protobuf:"bytes,1,rep,name=scenarios" json:"scenarios,omitempty"`
}
func (m *Scenarios) Reset() { *m = Scenarios{} }
func (m *Scenarios) String() string { return proto.CompactTextString(m) }
func (*Scenarios) ProtoMessage() {}
func (*Scenarios) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *Scenarios) GetScenarios() []*Scenario {
if m != nil {
return m.Scenarios
}
return nil
}
func init() {
proto.RegisterType((*PoissonParams)(nil), "grpc.testing.PoissonParams")
proto.RegisterType((*UniformParams)(nil), "grpc.testing.UniformParams")
proto.RegisterType((*DeterministicParams)(nil), "grpc.testing.DeterministicParams")
proto.RegisterType((*ParetoParams)(nil), "grpc.testing.ParetoParams")
proto.RegisterType((*ClosedLoopParams)(nil), "grpc.testing.ClosedLoopParams")
proto.RegisterType((*LoadParams)(nil), "grpc.testing.LoadParams")
proto.RegisterType((*SecurityParams)(nil), "grpc.testing.SecurityParams")
proto.RegisterType((*ClientConfig)(nil), "grpc.testing.ClientConfig")
proto.RegisterType((*ClientStatus)(nil), "grpc.testing.ClientStatus")
proto.RegisterType((*Mark)(nil), "grpc.testing.Mark")
proto.RegisterType((*ClientArgs)(nil), "grpc.testing.ClientArgs")
proto.RegisterType((*ServerConfig)(nil), "grpc.testing.ServerConfig")
proto.RegisterType((*ServerArgs)(nil), "grpc.testing.ServerArgs")
proto.RegisterType((*ServerStatus)(nil), "grpc.testing.ServerStatus")
proto.RegisterType((*CoreRequest)(nil), "grpc.testing.CoreRequest")
proto.RegisterType((*CoreResponse)(nil), "grpc.testing.CoreResponse")
proto.RegisterType((*Void)(nil), "grpc.testing.Void")
proto.RegisterType((*Scenario)(nil), "grpc.testing.Scenario")
proto.RegisterType((*Scenarios)(nil), "grpc.testing.Scenarios")
proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value)
proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value)
proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value)
}
func init() { proto.RegisterFile("control.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 1162 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0xdb, 0x46,
0x13, 0x8d, 0x14, 0xc9, 0x96, 0x86, 0x92, 0xac, 0x6f, 0xbf, 0xa4, 0x60, 0x1c, 0x27, 0x6d, 0xd8,
0x16, 0x0d, 0x5c, 0xc0, 0x29, 0xd4, 0x02, 0x69, 0xd1, 0x8b, 0x40, 0x56, 0x85, 0xd8, 0x80, 0xe3,
0xba, 0x2b, 0x27, 0x45, 0xae, 0x08, 0x9a, 0x5a, 0x4b, 0x44, 0x24, 0x2e, 0xbb, 0x4b, 0xc6, 0xf0,
0x2b, 0xf4, 0x99, 0xfa, 0x1c, 0x7d, 0x8d, 0xbe, 0x42, 0x67, 0xff, 0x64, 0x52, 0x11, 0x10, 0xb7,
0xbd, 0xe3, 0xce, 0x9c, 0xb3, 0x3b, 0x3b, 0x67, 0x66, 0x96, 0xd0, 0x8d, 0x79, 0x9a, 0x0b, 0xbe,
0x38, 0xc8, 0x04, 0xcf, 0x39, 0xe9, 0xcc, 0x44, 0x16, 0x1f, 0xe4, 0x4c, 0xe6, 0x49, 0x3a, 0xdb,
0xed, 0x65, 0xd1, 0xf5, 0x82, 0x47, 0x53, 0x69, 0xbc, 0xbb, 0x9e, 0xcc, 0xa3, 0xdc, 0x2e, 0x82,
0x01, 0x74, 0xcf, 0x78, 0x22, 0x25, 0x4f, 0xcf, 0x22, 0x11, 0x2d, 0x25, 0x79, 0x02, 0x1d, 0x7e,
0x79, 0xc9, 0x04, 0x9b, 0x86, 0x8a, 0xe4, 0xd7, 0x3e, 0xab, 0x3d, 0xad, 0x51, 0xcf, 0xda, 0x4e,
0xd0, 0x14, 0x44, 0xd0, 0x7d, 0x9d, 0x26, 0x97, 0x5c, 0x2c, 0x2d, 0xe7, 0x2b, 0xd8, 0x49, 0xd2,
0x9c, 0x89, 0x48, 0x88, 0xe4, 0x7d, 0xb4, 0x40, 0xa2, 0xa5, 0xf5, 0xca, 0xe6, 0x13, 0xfe, 0x01,
0x70, 0x9e, 0xf8, 0xf5, 0x0f, 0x81, 0x47, 0x49, 0xf0, 0x3d, 0xfc, 0xff, 0x27, 0x86, 0x96, 0x65,
0x92, 0x26, 0x78, 0x8b, 0xf8, 0xf6, 0xc1, 0xfd, 0x02, 0x1d, 0x04, 0xb3, 0x9c, 0x5b, 0xca, 0xd7,
0xf0, 0xbf, 0xca, 0x91, 0x17, 0x91, 0x64, 0x96, 0xd7, 0x2f, 0x3b, 0x0e, 0xd1, 0x4e, 0xee, 0x41,
0x33, 0x5a, 0x64, 0xf3, 0xc8, 0x46, 0x65, 0x16, 0x01, 0x81, 0xfe, 0x68, 0xc1, 0xa5, 0x3a, 0x80,
0x67, 0x66, 0xdb, 0xe0, 0x8f, 0x3a, 0x80, 0x3a, 0xcf, 0x9e, 0x32, 0x04, 0x2f, 0xd6, 0x10, 0x8c,
0x8b, 0x67, 0x7a, 0x7f, 0x6f, 0xf0, 0xf8, 0xa0, 0xac, 0xc3, 0xc1, 0xfa, 0x1e, 0x47, 0x77, 0x28,
0xc4, 0x2b, 0x1b, 0x79, 0x0e, 0xdb, 0x99, 0x51, 0x42, 0x9f, 0xee, 0x0d, 0x1e, 0x56, 0xe9, 0x15,
0x99, 0x90, 0xeb, 0xd0, 0x8a, 0x58, 0x18, 0x39, 0xfc, 0xbb, 0x9b, 0x88, 0x15, 0xad, 0x14, 0xd1,
0xa2, 0xc9, 0x8f, 0xb0, 0x35, 0xd5, 0x49, 0xf6, 0x1b, 0x9a, 0xf7, 0xa4, 0xca, 0xdb, 0x20, 0x00,
0xb2, 0x2d, 0x85, 0x7c, 0x07, 0x5b, 0x99, 0xce, 0xb3, 0xdf, 0xd4, 0xe4, 0xdd, 0xb5, 0x68, 0x4b,
0x1a, 0x28, 0x96, 0xc1, 0x1e, 0x6e, 0x41, 0x43, 0x09, 0x17, 0x5c, 0x40, 0x6f, 0xc2, 0xe2, 0x42,
0x24, 0xf9, 0xb5, 0xcd, 0xe0, 0x63, 0xf0, 0x0a, 0xc9, 0x42, 0xc5, 0x0f, 0xe3, 0x48, 0x67, 0xb0,
0x45, 0xdb, 0x68, 0x3a, 0x47, 0xcb, 0x28, 0x22, 0xdf, 0xc0, 0x3d, 0xc9, 0xc4, 0x7b, 0x26, 0xc2,
0x39, 0x47, 0x08, 0xc7, 0x2f, 0x91, 0x4c, 0x99, 0xce, 0x55, 0x9b, 0x12, 0xe3, 0x3b, 0x42, 0xd7,
0xcf, 0xd6, 0x13, 0xfc, 0xde, 0x84, 0xce, 0x68, 0x91, 0xb0, 0x34, 0x1f, 0xf1, 0xf4, 0x32, 0x99,
0x91, 0x2f, 0xa1, 0x67, 0xb7, 0xc8, 0x23, 0x31, 0x63, 0xb9, 0xc4, 0x53, 0xee, 0x22, 0xb9, 0x6b,
0xac, 0xe7, 0xc6, 0x48, 0x7e, 0x50, 0x5a, 0x2a, 0x5a, 0x98, 0x5f, 0x67, 0xe6, 0x80, 0xde, 0xc0,
0x5f, 0xd7, 0x52, 0x01, 0xce, 0xd1, 0xaf, 0x34, 0x74, 0xdf, 0x64, 0x0c, 0x3b, 0xd2, 0x5e, 0x2b,
0xcc, 0xf4, 0xbd, 0xac, 0x24, 0x7b, 0x55, 0x7a, 0xf5, 0xee, 0xb4, 0x27, 0xab, 0xb9, 0x78, 0x01,
0x7b, 0xbc, 0xc8, 0xb1, 0x4d, 0xd3, 0x29, 0xa2, 0x43, 0x64, 0xca, 0x30, 0xc3, 0xb0, 0xe3, 0x79,
0x94, 0xa6, 0x6c, 0xa1, 0xe5, 0x6a, 0xd2, 0x07, 0x25, 0x0c, 0x45, 0xc8, 0x19, 0x13, 0x23, 0x03,
0x50, 0x7d, 0x66, 0xaf, 0x60, 0x29, 0x52, 0xab, 0xd4, 0xa4, 0x3d, 0x63, 0xb6, 0x38, 0xa9, 0xb2,
0x1a, 0xc9, 0xeb, 0x34, 0x0e, 0xdd, 0x8d, 0xe7, 0x82, 0xe1, 0xa4, 0xf0, 0xb7, 0x35, 0x9a, 0x68,
0x9f, 0xbd, 0xab, 0xf1, 0x20, 0xa3, 0x85, 0xf1, 0x98, 0xd4, 0xb4, 0x74, 0x6a, 0xee, 0x57, 0xef,
0x86, 0xa1, 0xe8, 0xbc, 0x6c, 0x0b, 0xf3, 0xa1, 0xf2, 0xa9, 0x34, 0x77, 0x09, 0x01, 0x9d, 0x90,
0xb5, 0x7c, 0xde, 0xb4, 0x12, 0x85, 0xc5, 0x4d, 0x5b, 0x1d, 0x82, 0x1b, 0x5e, 0x61, 0xac, 0x35,
0xf4, 0xbd, 0x8d, 0xad, 0x61, 0x30, 0x46, 0x66, 0xda, 0xcd, 0xca, 0x4b, 0x72, 0x04, 0xfd, 0x39,
0x96, 0x30, 0x9f, 0xe1, 0x8e, 0x2e, 0x86, 0x8e, 0xde, 0xe5, 0x51, 0x75, 0x97, 0x23, 0x87, 0xb2,
0x81, 0xec, 0xcc, 0xab, 0x06, 0xf2, 0x10, 0xda, 0x31, 0x17, 0x2c, 0x5c, 0xa0, 0xdd, 0xef, 0x62,
0xe9, 0x34, 0x69, 0x4b, 0x19, 0x4e, 0x70, 0x4d, 0x1e, 0x01, 0x58, 0xe7, 0x32, 0xc9, 0xfd, 0x9e,
0xce, 0x5f, 0xdb, 0x78, 0xd1, 0x10, 0xbc, 0x70, 0xb5, 0x38, 0xc1, 0xe1, 0x5b, 0x48, 0xf2, 0x0c,
0x9a, 0x7a, 0x0c, 0xdb, 0x51, 0xf1, 0x60, 0x53, 0x79, 0x29, 0xa8, 0xa4, 0x06, 0x17, 0xec, 0x41,
0xe3, 0x55, 0x24, 0xde, 0xa9, 0x11, 0x25, 0x98, 0x64, 0xb9, 0xed, 0x10, 0xb3, 0x08, 0x0a, 0x00,
0xc3, 0x19, 0x8a, 0x99, 0x24, 0x03, 0xdc, 0x9c, 0xe5, 0x85, 0x9b, 0x43, 0xbb, 0x9b, 0x36, 0x37,
0xd9, 0xc1, 0xd6, 0x34, 0x50, 0xf2, 0x14, 0x1a, 0x4b, 0xdc, 0xdf, 0xce, 0x1e, 0x52, 0xa5, 0xa8,
0x93, 0x11, 0xaa, 0x11, 0x87, 0x6d, 0xd8, 0xc6, 0x4e, 0x51, 0x05, 0x10, 0xfc, 0x59, 0x87, 0xce,
0x44, 0x37, 0x8f, 0x4d, 0x36, 0x6a, 0xed, 0x5a, 0x4c, 0x15, 0x48, 0x6d, 0x53, 0xef, 0x18, 0x82,
0xe9, 0x1d, 0xb9, 0xfa, 0xde, 0xd4, 0x3b, 0xf5, 0x7f, 0xd1, 0x3b, 0x04, 0x1a, 0x19, 0x17, 0xb9,
0xed, 0x11, 0xfd, 0x7d, 0x53, 0xe5, 0x2e, 0xb6, 0x0d, 0x55, 0x6e, 0xa3, 0xb2, 0x55, 0x5e, 0x55,
0xb3, 0xb5, 0xa6, 0xe6, 0x86, 0xba, 0x6c, 0xff, 0xe3, 0xba, 0xac, 0x54, 0x13, 0x54, 0xab, 0x49,
0xe9, 0x69, 0x02, 0xba, 0x85, 0x9e, 0x65, 0x01, 0xfe, 0xa3, 0x9e, 0x89, 0x93, 0xf3, 0x56, 0x55,
0x7a, 0x03, 0x75, 0x55, 0xba, 0xca, 0x7e, 0xbd, 0x94, 0x7d, 0xac, 0x58, 0x75, 0x2f, 0x33, 0x0a,
0x9b, 0xd4, 0x2c, 0x82, 0x2e, 0x78, 0x23, 0xfc, 0xa0, 0xec, 0xb7, 0x02, 0xb7, 0x0b, 0xbe, 0xc0,
0xfe, 0xd0, 0x4b, 0x99, 0xf1, 0xd4, 0xbc, 0xc4, 0x86, 0x54, 0x2b, 0x93, 0xf0, 0xf9, 0x78, 0xc3,
0x93, 0x69, 0xf0, 0x57, 0x1d, 0x5a, 0x93, 0x98, 0xa5, 0x91, 0x48, 0xb8, 0x3a, 0x33, 0x8d, 0x96,
0xa6, 0xd8, 0xda, 0x54, 0x7f, 0xe3, 0x04, 0xed, 0xba, 0x01, 0x68, 0xf4, 0xa9, 0x7f, 0xac, 0x13,
0x68, 0x27, 0x2e, 0xbf, 0x15, 0x9f, 0x82, 0x97, 0x16, 0x4b, 0x3b, 0x16, 0x5d, 0xe8, 0x80, 0x26,
0xc3, 0x51, 0x33, 0xda, 0x3e, 0x1b, 0xee, 0x84, 0xc6, 0xc7, 0xb4, 0xa1, 0x1d, 0x59, 0x6e, 0x15,
0x7b, 0x82, 0xb1, 0xb9, 0xf9, 0xac, 0x4e, 0x30, 0x1c, 0xa9, 0x9e, 0xab, 0xab, 0x48, 0x2c, 0x8b,
0x0c, 0x31, 0x78, 0x06, 0xd6, 0xeb, 0x96, 0xc6, 0x74, 0x8d, 0x75, 0x62, 0x8c, 0xea, 0x07, 0xe7,
0x82, 0xa5, 0xf1, 0x5c, 0x69, 0xb9, 0x42, 0x9a, 0xca, 0xee, 0xaf, 0x1c, 0x0e, 0xfc, 0x1c, 0x7c,
0x99, 0x45, 0x57, 0x29, 0xfe, 0xa6, 0xc4, 0xf8, 0x33, 0x74, 0xc5, 0xc5, 0x3b, 0x7d, 0x83, 0x22,
0x75, 0x55, 0x7e, 0x5f, 0xfb, 0x4f, 0x94, 0xfb, 0x57, 0xed, 0x1d, 0x29, 0x67, 0x30, 0x84, 0xb6,
0x4b, 0xb8, 0xc4, 0xb7, 0xbf, 0x2d, 0xdd, 0x42, 0xbf, 0xa1, 0xde, 0xe0, 0x93, 0xb5, 0x7b, 0x5b,
0x37, 0xbd, 0x01, 0xee, 0x3f, 0x73, 0x33, 0x4a, 0xb7, 0xfb, 0x0e, 0x78, 0x93, 0xb7, 0xa7, 0xa3,
0x70, 0x74, 0x72, 0x3c, 0x3e, 0x3d, 0xef, 0xdf, 0x21, 0x7d, 0xe8, 0x0c, 0xcb, 0x96, 0xda, 0xfe,
0xb1, 0x6b, 0x82, 0x0a, 0x61, 0x32, 0xa6, 0x6f, 0xc6, 0xb4, 0x4c, 0xb0, 0x96, 0x1a, 0xf1, 0xe1,
0x9e, 0xb1, 0xbc, 0x1c, 0x9f, 0x8e, 0xe9, 0xf1, 0xca, 0x53, 0xdf, 0xff, 0x1c, 0xb6, 0xed, 0xbb,
0x44, 0xda, 0xd0, 0x7c, 0x7d, 0x3a, 0xa4, 0x6f, 0x71, 0x87, 0x2e, 0x5e, 0xea, 0x9c, 0x8e, 0x87,
0xaf, 0x8e, 0x4f, 0x5f, 0xf6, 0x6b, 0x17, 0x5b, 0xfa, 0x97, 0xf8, 0xdb, 0xbf, 0x03, 0x00, 0x00,
0xff, 0xff, 0x75, 0x59, 0xf4, 0x03, 0x4e, 0x0b, 0x00, 0x00,
}

View file

@ -0,0 +1,201 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
import "payloads.proto";
import "stats.proto";
package grpc.testing;
enum ClientType {
SYNC_CLIENT = 0;
ASYNC_CLIENT = 1;
}
enum ServerType {
SYNC_SERVER = 0;
ASYNC_SERVER = 1;
ASYNC_GENERIC_SERVER = 2;
}
enum RpcType {
UNARY = 0;
STREAMING = 1;
}
// Parameters of poisson process distribution, which is a good representation
// of activity coming in from independent identical stationary sources.
message PoissonParams {
// The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
double offered_load = 1;
}
message UniformParams {
double interarrival_lo = 1;
double interarrival_hi = 2;
}
message DeterministicParams {
double offered_load = 1;
}
message ParetoParams {
double interarrival_base = 1;
double alpha = 2;
}
// Once an RPC finishes, immediately start a new one.
// No configuration parameters needed.
message ClosedLoopParams {
}
message LoadParams {
oneof load {
ClosedLoopParams closed_loop = 1;
PoissonParams poisson = 2;
UniformParams uniform = 3;
DeterministicParams determ = 4;
ParetoParams pareto = 5;
};
}
// presence of SecurityParams implies use of TLS
message SecurityParams {
bool use_test_ca = 1;
string server_host_override = 2;
}
message ClientConfig {
// List of targets to connect to. At least one target needs to be specified.
repeated string server_targets = 1;
ClientType client_type = 2;
SecurityParams security_params = 3;
// How many concurrent RPCs to start for each channel.
// For synchronous client, use a separate thread for each outstanding RPC.
int32 outstanding_rpcs_per_channel = 4;
// Number of independent client channels to create.
// i-th channel will connect to server_target[i % server_targets.size()]
int32 client_channels = 5;
// Only for async client. Number of threads to use to start/manage RPCs.
int32 async_client_threads = 7;
RpcType rpc_type = 8;
// The requested load for the entire client (aggregated over all the threads).
LoadParams load_params = 10;
PayloadConfig payload_config = 11;
HistogramParams histogram_params = 12;
// Specify the cores we should run the client on, if desired
repeated int32 core_list = 13;
int32 core_limit = 14;
}
message ClientStatus {
ClientStats stats = 1;
}
// Request current stats
message Mark {
// if true, the stats will be reset after taking their snapshot.
bool reset = 1;
}
message ClientArgs {
oneof argtype {
ClientConfig setup = 1;
Mark mark = 2;
}
}
message ServerConfig {
ServerType server_type = 1;
SecurityParams security_params = 2;
// Port on which to listen. Zero means pick unused port.
int32 port = 4;
// Only for async server. Number of threads used to serve the requests.
int32 async_server_threads = 7;
// Specify the number of cores to limit server to, if desired
int32 core_limit = 8;
// payload config, used in generic server
PayloadConfig payload_config = 9;
// Specify the cores we should run the server on, if desired
repeated int32 core_list = 10;
}
message ServerArgs {
oneof argtype {
ServerConfig setup = 1;
Mark mark = 2;
}
}
message ServerStatus {
ServerStats stats = 1;
// the port bound by the server
int32 port = 2;
// Number of cores available to the server
int32 cores = 3;
}
message CoreRequest {
}
message CoreResponse {
// Number of cores available on the server
int32 cores = 1;
}
message Void {
}
// A single performance scenario: input to qps_json_driver
message Scenario {
// Human readable name for this scenario
string name = 1;
// Client configuration
ClientConfig client_config = 2;
// Number of clients to start for the test
int32 num_clients = 3;
// Server configuration
ServerConfig server_config = 4;
// Number of servers to start for the test
int32 num_servers = 5;
// Warmup period, in seconds
int32 warmup_seconds = 6;
// Benchmark time, in seconds
int32 benchmark_seconds = 7;
// Number of workers to spawn locally (usually zero)
int32 spawn_local_worker_count = 8;
}
// A set of scenarios to be run with qps_json_driver
message Scenarios {
repeated Scenario scenarios = 1;
}

View file

@ -0,0 +1,347 @@
// Code generated by protoc-gen-go.
// source: messages.proto
// DO NOT EDIT!
package grpc_testing
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// The type of payload that should be returned.
type PayloadType int32
const (
// Compressable text format.
PayloadType_COMPRESSABLE PayloadType = 0
// Uncompressable binary format.
PayloadType_UNCOMPRESSABLE PayloadType = 1
// Randomly chosen from all other formats defined in this enum.
PayloadType_RANDOM PayloadType = 2
)
var PayloadType_name = map[int32]string{
0: "COMPRESSABLE",
1: "UNCOMPRESSABLE",
2: "RANDOM",
}
var PayloadType_value = map[string]int32{
"COMPRESSABLE": 0,
"UNCOMPRESSABLE": 1,
"RANDOM": 2,
}
func (x PayloadType) String() string {
return proto.EnumName(PayloadType_name, int32(x))
}
func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
// Compression algorithms
type CompressionType int32
const (
// No compression
CompressionType_NONE CompressionType = 0
CompressionType_GZIP CompressionType = 1
CompressionType_DEFLATE CompressionType = 2
)
var CompressionType_name = map[int32]string{
0: "NONE",
1: "GZIP",
2: "DEFLATE",
}
var CompressionType_value = map[string]int32{
"NONE": 0,
"GZIP": 1,
"DEFLATE": 2,
}
func (x CompressionType) String() string {
return proto.EnumName(CompressionType_name, int32(x))
}
func (CompressionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
// A block of data, to simply increase gRPC message size.
type Payload struct {
// The type of data in body.
Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"`
// Primary contents of payload.
Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
}
func (m *Payload) Reset() { *m = Payload{} }
func (m *Payload) String() string { return proto.CompactTextString(m) }
func (*Payload) ProtoMessage() {}
func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
// A protobuf representation for grpc status. This is used by test
// clients to specify a status that the server should attempt to return.
type EchoStatus struct {
Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
}
func (m *EchoStatus) Reset() { *m = EchoStatus{} }
func (m *EchoStatus) String() string { return proto.CompactTextString(m) }
func (*EchoStatus) ProtoMessage() {}
func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
// Unary request.
type SimpleRequest struct {
// Desired payload type in the response from the server.
// If response_type is RANDOM, server randomly chooses one from other formats.
ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"`
// Desired payload size in the response from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"`
// Optional input payload sent along with the request.
Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"`
// Whether SimpleResponse should include username.
FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"`
// Whether SimpleResponse should include OAuth scope.
FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"`
// Compression algorithm to be used by the server for the response (stream)
ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"`
// Whether server should return a given status
ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"`
}
func (m *SimpleRequest) Reset() { *m = SimpleRequest{} }
func (m *SimpleRequest) String() string { return proto.CompactTextString(m) }
func (*SimpleRequest) ProtoMessage() {}
func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
func (m *SimpleRequest) GetPayload() *Payload {
if m != nil {
return m.Payload
}
return nil
}
func (m *SimpleRequest) GetResponseStatus() *EchoStatus {
if m != nil {
return m.ResponseStatus
}
return nil
}
// Unary response, as configured by the request.
type SimpleResponse struct {
// Payload to increase message size.
Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"`
// The user the request came from, for verifying authentication was
// successful when the client expected it.
Username string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"`
// OAuth scope.
OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"`
}
func (m *SimpleResponse) Reset() { *m = SimpleResponse{} }
func (m *SimpleResponse) String() string { return proto.CompactTextString(m) }
func (*SimpleResponse) ProtoMessage() {}
func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
func (m *SimpleResponse) GetPayload() *Payload {
if m != nil {
return m.Payload
}
return nil
}
// Client-streaming request.
type StreamingInputCallRequest struct {
// Optional input payload sent along with the request.
Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"`
}
func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} }
func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) }
func (*StreamingInputCallRequest) ProtoMessage() {}
func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
func (m *StreamingInputCallRequest) GetPayload() *Payload {
if m != nil {
return m.Payload
}
return nil
}
// Client-streaming response.
type StreamingInputCallResponse struct {
// Aggregated size of payloads received from the client.
AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"`
}
func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} }
func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) }
func (*StreamingInputCallResponse) ProtoMessage() {}
func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} }
// Configuration for a particular response.
type ResponseParameters struct {
// Desired payload sizes in responses from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
Size int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"`
// Desired interval between consecutive responses in the response stream in
// microseconds.
IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"`
}
func (m *ResponseParameters) Reset() { *m = ResponseParameters{} }
func (m *ResponseParameters) String() string { return proto.CompactTextString(m) }
func (*ResponseParameters) ProtoMessage() {}
func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} }
// Server-streaming request.
type StreamingOutputCallRequest struct {
// Desired payload type in the response from the server.
// If response_type is RANDOM, the payload from each response in the stream
// might be of different types. This is to simulate a mixed type of payload
// stream.
ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"`
// Configuration for each expected response message.
ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"`
// Optional input payload sent along with the request.
Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"`
// Compression algorithm to be used by the server for the response (stream)
ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"`
// Whether server should return a given status
ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"`
}
func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} }
func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) }
func (*StreamingOutputCallRequest) ProtoMessage() {}
func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} }
func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters {
if m != nil {
return m.ResponseParameters
}
return nil
}
func (m *StreamingOutputCallRequest) GetPayload() *Payload {
if m != nil {
return m.Payload
}
return nil
}
func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus {
if m != nil {
return m.ResponseStatus
}
return nil
}
// Server-streaming response, as configured by the request and parameters.
type StreamingOutputCallResponse struct {
// Payload to increase response size.
Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"`
}
func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} }
func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) }
func (*StreamingOutputCallResponse) ProtoMessage() {}
func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} }
func (m *StreamingOutputCallResponse) GetPayload() *Payload {
if m != nil {
return m.Payload
}
return nil
}
// For reconnect interop test only.
// Client tells server what reconnection parameters it used.
type ReconnectParams struct {
MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs" json:"max_reconnect_backoff_ms,omitempty"`
}
func (m *ReconnectParams) Reset() { *m = ReconnectParams{} }
func (m *ReconnectParams) String() string { return proto.CompactTextString(m) }
func (*ReconnectParams) ProtoMessage() {}
func (*ReconnectParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} }
// For reconnect interop test only.
// Server tells client whether its reconnects are following the spec and the
// reconnect backoffs it saw.
type ReconnectInfo struct {
Passed bool `protobuf:"varint,1,opt,name=passed" json:"passed,omitempty"`
BackoffMs []int32 `protobuf:"varint,2,rep,name=backoff_ms,json=backoffMs" json:"backoff_ms,omitempty"`
}
func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} }
func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) }
func (*ReconnectInfo) ProtoMessage() {}
func (*ReconnectInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} }
func init() {
proto.RegisterType((*Payload)(nil), "grpc.testing.Payload")
proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus")
proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest")
proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse")
proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest")
proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse")
proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters")
proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest")
proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse")
proto.RegisterType((*ReconnectParams)(nil), "grpc.testing.ReconnectParams")
proto.RegisterType((*ReconnectInfo)(nil), "grpc.testing.ReconnectInfo")
proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value)
proto.RegisterEnum("grpc.testing.CompressionType", CompressionType_name, CompressionType_value)
}
func init() { proto.RegisterFile("messages.proto", fileDescriptor1) }
var fileDescriptor1 = []byte{
// 645 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
0x10, 0x25, 0xdf, 0xe9, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x51, 0x99, 0x4b, 0x55, 0x89,
0x20, 0x15, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9b, 0x84, 0x75, 0x7b, 0xe1, 0x62,
0x6d, 0x9c, 0x4d, 0x1a, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x28, 0x07, 0xee, 0xfc, 0x60, 0xee, 0xec,
0xae, 0xbd, 0x8e, 0xd3, 0xf6, 0xd0, 0xc2, 0x85, 0xdb, 0xce, 0xcc, 0x9b, 0x97, 0x79, 0x33, 0xcf,
0x0a, 0xb4, 0x7c, 0xca, 0x39, 0x99, 0x51, 0xde, 0x09, 0x23, 0x26, 0x18, 0x6a, 0xce, 0xa2, 0xd0,
0xeb, 0x08, 0xca, 0xc5, 0x3c, 0x98, 0xd9, 0xa7, 0x50, 0x1b, 0x91, 0xab, 0x05, 0x23, 0x13, 0xf4,
0x02, 0xca, 0xe2, 0x2a, 0xa4, 0x56, 0x61, 0xb7, 0xb0, 0xd7, 0x3a, 0xd8, 0xea, 0xe4, 0x71, 0x9d,
0x14, 0x74, 0x2e, 0x01, 0x58, 0xc3, 0x10, 0x82, 0xf2, 0x98, 0x4d, 0xae, 0xac, 0xa2, 0x84, 0x37,
0xb1, 0x7e, 0xdb, 0x6f, 0x01, 0x7a, 0xde, 0x25, 0x73, 0x04, 0x11, 0x31, 0x57, 0x08, 0x8f, 0x4d,
0x12, 0xc2, 0x0a, 0xd6, 0x6f, 0x64, 0x41, 0x2d, 0x9d, 0x47, 0x37, 0xae, 0x61, 0x13, 0xda, 0xbf,
0x4a, 0xb0, 0xee, 0xcc, 0xfd, 0x70, 0x41, 0x31, 0xfd, 0x1a, 0xcb, 0x9f, 0x45, 0xef, 0x60, 0x3d,
0xa2, 0x3c, 0x64, 0x01, 0xa7, 0xee, 0xdd, 0x26, 0x6b, 0x1a, 0xbc, 0x8a, 0xd0, 0xf3, 0x5c, 0x3f,
0x9f, 0xff, 0x48, 0x7e, 0xb1, 0xb2, 0x04, 0x39, 0x32, 0x87, 0x5e, 0x42, 0x2d, 0x4c, 0x18, 0xac,
0x92, 0x2c, 0x37, 0x0e, 0x36, 0x6f, 0xa5, 0xc7, 0x06, 0xa5, 0x58, 0xa7, 0xf3, 0xc5, 0xc2, 0x8d,
0x39, 0x8d, 0x02, 0xe2, 0x53, 0xab, 0x2c, 0xdb, 0xea, 0xb8, 0xa9, 0x92, 0x17, 0x69, 0x0e, 0xed,
0x41, 0x5b, 0x83, 0x18, 0x89, 0xc5, 0xa5, 0xcb, 0x3d, 0x26, 0xa7, 0xaf, 0x68, 0x5c, 0x4b, 0xe5,
0x87, 0x2a, 0xed, 0xa8, 0x2c, 0x1a, 0xc1, 0xa3, 0x6c, 0x48, 0x8f, 0xf9, 0xa1, 0x0c, 0xf8, 0x9c,
0x05, 0x56, 0x55, 0x6b, 0xdd, 0x59, 0x1d, 0xe6, 0x68, 0x09, 0xd0, 0x7a, 0x1f, 0x9a, 0xd6, 0x5c,
0x01, 0x75, 0x61, 0x63, 0x29, 0x5b, 0x5f, 0xc2, 0xaa, 0x69, 0x65, 0xd6, 0x2a, 0xd9, 0xf2, 0x52,
0xb8, 0x95, 0xad, 0x44, 0xc7, 0xf6, 0x4f, 0x68, 0x99, 0x53, 0x24, 0xf9, 0xfc, 0x9a, 0x0a, 0x77,
0x5a, 0xd3, 0x36, 0xd4, 0xb3, 0x0d, 0x25, 0x97, 0xce, 0x62, 0xf4, 0x0c, 0x1a, 0xf9, 0xc5, 0x94,
0x74, 0x19, 0x58, 0xb6, 0x14, 0xe9, 0xca, 0x2d, 0x47, 0x44, 0x94, 0xf8, 0x92, 0xba, 0x1f, 0x84,
0xb1, 0x38, 0x22, 0x8b, 0x85, 0xb1, 0xc5, 0x7d, 0x47, 0xb1, 0xcf, 0x61, 0xfb, 0x36, 0xb6, 0x54,
0xd9, 0x6b, 0x78, 0x42, 0x66, 0xb3, 0x88, 0xce, 0x88, 0xa0, 0x13, 0x37, 0xed, 0x49, 0xfc, 0x92,
0x18, 0x77, 0x73, 0x59, 0x4e, 0xa9, 0x95, 0x71, 0xec, 0x3e, 0x20, 0xc3, 0x31, 0x22, 0x91, 0x94,
0x25, 0x68, 0xa4, 0x3d, 0x9f, 0x6b, 0xd5, 0x6f, 0x25, 0x77, 0x1e, 0xc8, 0xea, 0x37, 0xa2, 0x5c,
0x93, 0xba, 0x10, 0x4c, 0xea, 0x82, 0xdb, 0xbf, 0x8b, 0xb9, 0x09, 0x87, 0xb1, 0xb8, 0x26, 0xf8,
0x5f, 0xbf, 0x83, 0x4f, 0x90, 0xf9, 0x44, 0xea, 0x33, 0xa3, 0xca, 0x39, 0x4a, 0x72, 0x79, 0xbb,
0xab, 0x2c, 0x37, 0x25, 0x61, 0x14, 0xdd, 0x94, 0x79, 0xef, 0xaf, 0xe6, 0xbf, 0xb4, 0xf9, 0x00,
0x9e, 0xde, 0xba, 0xf6, 0xbf, 0xf4, 0xbc, 0xfd, 0x11, 0x36, 0x30, 0xf5, 0x58, 0x10, 0x50, 0x4f,
0xe8, 0x65, 0x71, 0xf4, 0x06, 0x2c, 0x9f, 0x7c, 0x77, 0x23, 0x93, 0x76, 0xc7, 0xc4, 0xfb, 0xc2,
0xa6, 0x53, 0xd7, 0xe7, 0xc6, 0x5e, 0xb2, 0x9e, 0x75, 0x1d, 0x26, 0xd5, 0x33, 0x6e, 0x9f, 0xc0,
0x7a, 0x96, 0xed, 0x07, 0x53, 0x86, 0x1e, 0x43, 0x35, 0x24, 0x9c, 0xd3, 0x64, 0x98, 0x3a, 0x4e,
0x23, 0xb4, 0x03, 0x90, 0xe3, 0x54, 0x47, 0xad, 0xe0, 0xb5, 0xb1, 0xe1, 0xd9, 0x7f, 0x0f, 0x8d,
0x9c, 0x33, 0x50, 0x1b, 0x9a, 0x47, 0xc3, 0xb3, 0x11, 0xee, 0x39, 0x4e, 0xf7, 0xf0, 0xb4, 0xd7,
0x7e, 0x20, 0x1d, 0xdb, 0xba, 0x18, 0xac, 0xe4, 0x0a, 0x08, 0xa0, 0x8a, 0xbb, 0x83, 0xe3, 0xe1,
0x59, 0xbb, 0xb8, 0x7f, 0x00, 0x1b, 0xd7, 0xee, 0x81, 0xea, 0x50, 0x1e, 0x0c, 0x07, 0xaa, 0x59,
0xbe, 0x3e, 0x7c, 0xee, 0x8f, 0x64, 0x4b, 0x03, 0x6a, 0xc7, 0xbd, 0x93, 0xd3, 0xee, 0x79, 0xaf,
0x5d, 0x1c, 0x57, 0xf5, 0x5f, 0xcd, 0xab, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce,
0x1e, 0x7c, 0x06, 0x00, 0x00,
}

View file

@ -0,0 +1,172 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Message definitions to be used by integration test service definitions.
syntax = "proto3";
package grpc.testing;
// The type of payload that should be returned.
enum PayloadType {
// Compressable text format.
COMPRESSABLE = 0;
// Uncompressable binary format.
UNCOMPRESSABLE = 1;
// Randomly chosen from all other formats defined in this enum.
RANDOM = 2;
}
// Compression algorithms
enum CompressionType {
// No compression
NONE = 0;
GZIP = 1;
DEFLATE = 2;
}
// A block of data, to simply increase gRPC message size.
message Payload {
// The type of data in body.
PayloadType type = 1;
// Primary contents of payload.
bytes body = 2;
}
// A protobuf representation for grpc status. This is used by test
// clients to specify a status that the server should attempt to return.
message EchoStatus {
int32 code = 1;
string message = 2;
}
// Unary request.
message SimpleRequest {
// Desired payload type in the response from the server.
// If response_type is RANDOM, server randomly chooses one from other formats.
PayloadType response_type = 1;
// Desired payload size in the response from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
int32 response_size = 2;
// Optional input payload sent along with the request.
Payload payload = 3;
// Whether SimpleResponse should include username.
bool fill_username = 4;
// Whether SimpleResponse should include OAuth scope.
bool fill_oauth_scope = 5;
// Compression algorithm to be used by the server for the response (stream)
CompressionType response_compression = 6;
// Whether server should return a given status
EchoStatus response_status = 7;
}
// Unary response, as configured by the request.
message SimpleResponse {
// Payload to increase message size.
Payload payload = 1;
// The user the request came from, for verifying authentication was
// successful when the client expected it.
string username = 2;
// OAuth scope.
string oauth_scope = 3;
}
// Client-streaming request.
message StreamingInputCallRequest {
// Optional input payload sent along with the request.
Payload payload = 1;
// Not expecting any payload from the response.
}
// Client-streaming response.
message StreamingInputCallResponse {
// Aggregated size of payloads received from the client.
int32 aggregated_payload_size = 1;
}
// Configuration for a particular response.
message ResponseParameters {
// Desired payload sizes in responses from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
int32 size = 1;
// Desired interval between consecutive responses in the response stream in
// microseconds.
int32 interval_us = 2;
}
// Server-streaming request.
message StreamingOutputCallRequest {
// Desired payload type in the response from the server.
// If response_type is RANDOM, the payload from each response in the stream
// might be of different types. This is to simulate a mixed type of payload
// stream.
PayloadType response_type = 1;
// Configuration for each expected response message.
repeated ResponseParameters response_parameters = 2;
// Optional input payload sent along with the request.
Payload payload = 3;
// Compression algorithm to be used by the server for the response (stream)
CompressionType response_compression = 6;
// Whether server should return a given status
EchoStatus response_status = 7;
}
// Server-streaming response, as configured by the request and parameters.
message StreamingOutputCallResponse {
// Payload to increase response size.
Payload payload = 1;
}
// For reconnect interop test only.
// Client tells server what reconnection parameters it used.
message ReconnectParams {
int32 max_reconnect_backoff_ms = 1;
}
// For reconnect interop test only.
// Server tells client whether its reconnects are following the spec and the
// reconnect backoffs it saw.
message ReconnectInfo {
bool passed = 1;
repeated int32 backoff_ms = 2;
}

View file

@ -0,0 +1,223 @@
// Code generated by protoc-gen-go.
// source: payloads.proto
// DO NOT EDIT!
package grpc_testing
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type ByteBufferParams struct {
ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"`
RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"`
}
func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} }
func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) }
func (*ByteBufferParams) ProtoMessage() {}
func (*ByteBufferParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
type SimpleProtoParams struct {
ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"`
RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"`
}
func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} }
func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) }
func (*SimpleProtoParams) ProtoMessage() {}
func (*SimpleProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
type ComplexProtoParams struct {
}
func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} }
func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) }
func (*ComplexProtoParams) ProtoMessage() {}
func (*ComplexProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
type PayloadConfig struct {
// Types that are valid to be assigned to Payload:
// *PayloadConfig_BytebufParams
// *PayloadConfig_SimpleParams
// *PayloadConfig_ComplexParams
Payload isPayloadConfig_Payload `protobuf_oneof:"payload"`
}
func (m *PayloadConfig) Reset() { *m = PayloadConfig{} }
func (m *PayloadConfig) String() string { return proto.CompactTextString(m) }
func (*PayloadConfig) ProtoMessage() {}
func (*PayloadConfig) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} }
type isPayloadConfig_Payload interface {
isPayloadConfig_Payload()
}
type PayloadConfig_BytebufParams struct {
BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,oneof"`
}
type PayloadConfig_SimpleParams struct {
SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,oneof"`
}
type PayloadConfig_ComplexParams struct {
ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,oneof"`
}
func (*PayloadConfig_BytebufParams) isPayloadConfig_Payload() {}
func (*PayloadConfig_SimpleParams) isPayloadConfig_Payload() {}
func (*PayloadConfig_ComplexParams) isPayloadConfig_Payload() {}
func (m *PayloadConfig) GetPayload() isPayloadConfig_Payload {
if m != nil {
return m.Payload
}
return nil
}
func (m *PayloadConfig) GetBytebufParams() *ByteBufferParams {
if x, ok := m.GetPayload().(*PayloadConfig_BytebufParams); ok {
return x.BytebufParams
}
return nil
}
func (m *PayloadConfig) GetSimpleParams() *SimpleProtoParams {
if x, ok := m.GetPayload().(*PayloadConfig_SimpleParams); ok {
return x.SimpleParams
}
return nil
}
func (m *PayloadConfig) GetComplexParams() *ComplexProtoParams {
if x, ok := m.GetPayload().(*PayloadConfig_ComplexParams); ok {
return x.ComplexParams
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*PayloadConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _PayloadConfig_OneofMarshaler, _PayloadConfig_OneofUnmarshaler, _PayloadConfig_OneofSizer, []interface{}{
(*PayloadConfig_BytebufParams)(nil),
(*PayloadConfig_SimpleParams)(nil),
(*PayloadConfig_ComplexParams)(nil),
}
}
func _PayloadConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*PayloadConfig)
// payload
switch x := m.Payload.(type) {
case *PayloadConfig_BytebufParams:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytebufParams); err != nil {
return err
}
case *PayloadConfig_SimpleParams:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.SimpleParams); err != nil {
return err
}
case *PayloadConfig_ComplexParams:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ComplexParams); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("PayloadConfig.Payload has unexpected type %T", x)
}
return nil
}
func _PayloadConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*PayloadConfig)
switch tag {
case 1: // payload.bytebuf_params
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ByteBufferParams)
err := b.DecodeMessage(msg)
m.Payload = &PayloadConfig_BytebufParams{msg}
return true, err
case 2: // payload.simple_params
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(SimpleProtoParams)
err := b.DecodeMessage(msg)
m.Payload = &PayloadConfig_SimpleParams{msg}
return true, err
case 3: // payload.complex_params
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ComplexProtoParams)
err := b.DecodeMessage(msg)
m.Payload = &PayloadConfig_ComplexParams{msg}
return true, err
default:
return false, nil
}
}
func _PayloadConfig_OneofSizer(msg proto.Message) (n int) {
m := msg.(*PayloadConfig)
// payload
switch x := m.Payload.(type) {
case *PayloadConfig_BytebufParams:
s := proto.Size(x.BytebufParams)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *PayloadConfig_SimpleParams:
s := proto.Size(x.SimpleParams)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *PayloadConfig_ComplexParams:
s := proto.Size(x.ComplexParams)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
func init() {
proto.RegisterType((*ByteBufferParams)(nil), "grpc.testing.ByteBufferParams")
proto.RegisterType((*SimpleProtoParams)(nil), "grpc.testing.SimpleProtoParams")
proto.RegisterType((*ComplexProtoParams)(nil), "grpc.testing.ComplexProtoParams")
proto.RegisterType((*PayloadConfig)(nil), "grpc.testing.PayloadConfig")
}
func init() { proto.RegisterFile("payloads.proto", fileDescriptor2) }
var fileDescriptor2 = []byte{
// 250 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc,
0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48,
0xd6, 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0xf2, 0xe2, 0x12, 0x70, 0xaa, 0x2c, 0x49,
0x75, 0x2a, 0x4d, 0x4b, 0x4b, 0x2d, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x16, 0x92, 0xe4, 0xe2,
0x28, 0x4a, 0x2d, 0x8c, 0x2f, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x62,
0x07, 0xf2, 0x83, 0x81, 0x5c, 0x21, 0x69, 0x2e, 0xce, 0xa2, 0xd4, 0xe2, 0x02, 0x88, 0x1c, 0x13,
0x58, 0x8e, 0x03, 0x24, 0x00, 0x92, 0x54, 0xf2, 0xe6, 0x12, 0x0c, 0xce, 0xcc, 0x2d, 0xc8, 0x49,
0x0d, 0x00, 0x59, 0x44, 0xa1, 0x61, 0x22, 0x5c, 0x42, 0xce, 0xf9, 0x20, 0xc3, 0x2a, 0x90, 0x4c,
0x53, 0xfa, 0xc6, 0xc8, 0xc5, 0x1b, 0x00, 0xf1, 0x8f, 0x73, 0x7e, 0x5e, 0x5a, 0x66, 0xba, 0x90,
0x3b, 0x17, 0x5f, 0x12, 0xd0, 0x03, 0x49, 0xa5, 0x69, 0xf1, 0x05, 0x60, 0x35, 0x60, 0x5b, 0xb8,
0x8d, 0xe4, 0xf4, 0x90, 0xfd, 0xa9, 0x87, 0xee, 0x49, 0x0f, 0x86, 0x20, 0x5e, 0xa8, 0x3e, 0xa8,
0x43, 0xdd, 0xb8, 0x78, 0x8b, 0xc1, 0xae, 0x87, 0x99, 0xc3, 0x04, 0x36, 0x47, 0x1e, 0xd5, 0x1c,
0x0c, 0x0f, 0x02, 0x0d, 0xe2, 0x81, 0xe8, 0x83, 0x9a, 0xe3, 0xc9, 0xc5, 0x97, 0x0c, 0x71, 0x38,
0xcc, 0x20, 0x66, 0xb0, 0x41, 0x0a, 0xa8, 0x06, 0x61, 0x7a, 0x0e, 0xe4, 0x24, 0xa8, 0x4e, 0x88,
0x80, 0x13, 0x27, 0x17, 0x3b, 0x34, 0xf2, 0x92, 0xd8, 0xc0, 0x91, 0x67, 0x0c, 0x08, 0x00, 0x00,
0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,55 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package grpc.testing;
message ByteBufferParams {
int32 req_size = 1;
int32 resp_size = 2;
}
message SimpleProtoParams {
int32 req_size = 1;
int32 resp_size = 2;
}
message ComplexProtoParams {
// TODO (vpai): Fill this in once the details of complex, representative
// protos are decided
}
message PayloadConfig {
oneof payload {
ByteBufferParams bytebuf_params = 1;
SimpleProtoParams simple_params = 2;
ComplexProtoParams complex_params = 3;
}
}

View file

@ -0,0 +1,443 @@
// Code generated by protoc-gen-go.
// source: services.proto
// DO NOT EDIT!
package grpc_testing
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion3
// Client API for BenchmarkService service
type BenchmarkServiceClient interface {
// One request followed by one response.
// The server returns the client payload as-is.
UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error)
// One request followed by one response.
// The server returns the client payload as-is.
StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error)
}
type benchmarkServiceClient struct {
cc *grpc.ClientConn
}
func NewBenchmarkServiceClient(cc *grpc.ClientConn) BenchmarkServiceClient {
return &benchmarkServiceClient{cc}
}
func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) {
out := new(SimpleResponse)
err := grpc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) {
stream, err := grpc.NewClientStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], c.cc, "/grpc.testing.BenchmarkService/StreamingCall", opts...)
if err != nil {
return nil, err
}
x := &benchmarkServiceStreamingCallClient{stream}
return x, nil
}
type BenchmarkService_StreamingCallClient interface {
Send(*SimpleRequest) error
Recv() (*SimpleResponse, error)
grpc.ClientStream
}
type benchmarkServiceStreamingCallClient struct {
grpc.ClientStream
}
func (x *benchmarkServiceStreamingCallClient) Send(m *SimpleRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) {
m := new(SimpleResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for BenchmarkService service
type BenchmarkServiceServer interface {
// One request followed by one response.
// The server returns the client payload as-is.
UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error)
// One request followed by one response.
// The server returns the client payload as-is.
StreamingCall(BenchmarkService_StreamingCallServer) error
}
func RegisterBenchmarkServiceServer(s *grpc.Server, srv BenchmarkServiceServer) {
s.RegisterService(&_BenchmarkService_serviceDesc, srv)
}
func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SimpleRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BenchmarkServiceServer).UnaryCall(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpc.testing.BenchmarkService/UnaryCall",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BenchmarkService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(BenchmarkServiceServer).StreamingCall(&benchmarkServiceStreamingCallServer{stream})
}
type BenchmarkService_StreamingCallServer interface {
Send(*SimpleResponse) error
Recv() (*SimpleRequest, error)
grpc.ServerStream
}
type benchmarkServiceStreamingCallServer struct {
grpc.ServerStream
}
func (x *benchmarkServiceStreamingCallServer) Send(m *SimpleResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *benchmarkServiceStreamingCallServer) Recv() (*SimpleRequest, error) {
m := new(SimpleRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _BenchmarkService_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpc.testing.BenchmarkService",
HandlerType: (*BenchmarkServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "UnaryCall",
Handler: _BenchmarkService_UnaryCall_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "StreamingCall",
Handler: _BenchmarkService_StreamingCall_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: fileDescriptor3,
}
// Client API for WorkerService service
type WorkerServiceClient interface {
// Start server with specified workload.
// First request sent specifies the ServerConfig followed by ServerStatus
// response. After that, a "Mark" can be sent anytime to request the latest
// stats. Closing the stream will initiate shutdown of the test server
// and once the shutdown has finished, the OK status is sent to terminate
// this RPC.
RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error)
// Start client with specified workload.
// First request sent specifies the ClientConfig followed by ClientStatus
// response. After that, a "Mark" can be sent anytime to request the latest
// stats. Closing the stream will initiate shutdown of the test client
// and once the shutdown has finished, the OK status is sent to terminate
// this RPC.
RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error)
// Just return the core count - unary call
CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error)
// Quit this worker
QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error)
}
type workerServiceClient struct {
cc *grpc.ClientConn
}
func NewWorkerServiceClient(cc *grpc.ClientConn) WorkerServiceClient {
return &workerServiceClient{cc}
}
func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) {
stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[0], c.cc, "/grpc.testing.WorkerService/RunServer", opts...)
if err != nil {
return nil, err
}
x := &workerServiceRunServerClient{stream}
return x, nil
}
type WorkerService_RunServerClient interface {
Send(*ServerArgs) error
Recv() (*ServerStatus, error)
grpc.ClientStream
}
type workerServiceRunServerClient struct {
grpc.ClientStream
}
func (x *workerServiceRunServerClient) Send(m *ServerArgs) error {
return x.ClientStream.SendMsg(m)
}
func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) {
m := new(ServerStatus)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) {
stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[1], c.cc, "/grpc.testing.WorkerService/RunClient", opts...)
if err != nil {
return nil, err
}
x := &workerServiceRunClientClient{stream}
return x, nil
}
type WorkerService_RunClientClient interface {
Send(*ClientArgs) error
Recv() (*ClientStatus, error)
grpc.ClientStream
}
type workerServiceRunClientClient struct {
grpc.ClientStream
}
func (x *workerServiceRunClientClient) Send(m *ClientArgs) error {
return x.ClientStream.SendMsg(m)
}
func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) {
m := new(ClientStatus)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) {
out := new(CoreResponse)
err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) {
out := new(Void)
err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for WorkerService service
type WorkerServiceServer interface {
// Start server with specified workload.
// First request sent specifies the ServerConfig followed by ServerStatus
// response. After that, a "Mark" can be sent anytime to request the latest
// stats. Closing the stream will initiate shutdown of the test server
// and once the shutdown has finished, the OK status is sent to terminate
// this RPC.
RunServer(WorkerService_RunServerServer) error
// Start client with specified workload.
// First request sent specifies the ClientConfig followed by ClientStatus
// response. After that, a "Mark" can be sent anytime to request the latest
// stats. Closing the stream will initiate shutdown of the test client
// and once the shutdown has finished, the OK status is sent to terminate
// this RPC.
RunClient(WorkerService_RunClientServer) error
// Just return the core count - unary call
CoreCount(context.Context, *CoreRequest) (*CoreResponse, error)
// Quit this worker
QuitWorker(context.Context, *Void) (*Void, error)
}
func RegisterWorkerServiceServer(s *grpc.Server, srv WorkerServiceServer) {
s.RegisterService(&_WorkerService_serviceDesc, srv)
}
func _WorkerService_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(WorkerServiceServer).RunServer(&workerServiceRunServerServer{stream})
}
type WorkerService_RunServerServer interface {
Send(*ServerStatus) error
Recv() (*ServerArgs, error)
grpc.ServerStream
}
type workerServiceRunServerServer struct {
grpc.ServerStream
}
func (x *workerServiceRunServerServer) Send(m *ServerStatus) error {
return x.ServerStream.SendMsg(m)
}
func (x *workerServiceRunServerServer) Recv() (*ServerArgs, error) {
m := new(ServerArgs)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _WorkerService_RunClient_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(WorkerServiceServer).RunClient(&workerServiceRunClientServer{stream})
}
type WorkerService_RunClientServer interface {
Send(*ClientStatus) error
Recv() (*ClientArgs, error)
grpc.ServerStream
}
type workerServiceRunClientServer struct {
grpc.ServerStream
}
func (x *workerServiceRunClientServer) Send(m *ClientStatus) error {
return x.ServerStream.SendMsg(m)
}
func (x *workerServiceRunClientServer) Recv() (*ClientArgs, error) {
m := new(ClientArgs)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CoreRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(WorkerServiceServer).CoreCount(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpc.testing.WorkerService/CoreCount",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest))
}
return interceptor(ctx, in, info, handler)
}
func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Void)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(WorkerServiceServer).QuitWorker(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpc.testing.WorkerService/QuitWorker",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void))
}
return interceptor(ctx, in, info, handler)
}
var _WorkerService_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpc.testing.WorkerService",
HandlerType: (*WorkerServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CoreCount",
Handler: _WorkerService_CoreCount_Handler,
},
{
MethodName: "QuitWorker",
Handler: _WorkerService_QuitWorker_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "RunServer",
Handler: _WorkerService_RunServer_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "RunClient",
Handler: _WorkerService_RunClient_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: fileDescriptor3,
}
func init() { proto.RegisterFile("services.proto", fileDescriptor3) }
var fileDescriptor3 = []byte{
// 254 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30,
0x10, 0x86, 0xa9, 0x07, 0xa1, 0xc1, 0x2e, 0x92, 0x93, 0x46, 0x1f, 0xc0, 0x53, 0x91, 0xd5, 0x17,
0x70, 0x8b, 0x1e, 0x05, 0xb7, 0xa8, 0xe7, 0x58, 0x87, 0x1a, 0x36, 0x4d, 0xea, 0xcc, 0x44, 0xf0,
0x49, 0x7c, 0x07, 0x9f, 0xd2, 0xee, 0x66, 0x0b, 0xb5, 0xe4, 0xb6, 0xc7, 0xf9, 0xbf, 0xe1, 0x23,
0x7f, 0x46, 0x2c, 0x08, 0xf0, 0xcb, 0x34, 0x40, 0x65, 0x8f, 0x9e, 0xbd, 0x3c, 0x69, 0xb1, 0x6f,
0x4a, 0x06, 0x62, 0xe3, 0x5a, 0xb5, 0xe8, 0x80, 0x48, 0xb7, 0x23, 0x55, 0x45, 0xe3, 0x1d, 0xa3,
0xb7, 0x71, 0x5c, 0xfe, 0x66, 0xe2, 0x74, 0x05, 0xae, 0xf9, 0xe8, 0x34, 0x6e, 0xea, 0x28, 0x92,
0x0f, 0x22, 0x7f, 0x76, 0x1a, 0xbf, 0x2b, 0x6d, 0xad, 0xbc, 0x28, 0xa7, 0xbe, 0xb2, 0x36, 0x5d,
0x6f, 0x61, 0x0d, 0x9f, 0x61, 0x08, 0xd4, 0x65, 0x1a, 0x52, 0xef, 0x1d, 0x81, 0x7c, 0x14, 0x45,
0xcd, 0x08, 0xba, 0x1b, 0xd8, 0x81, 0xae, 0xab, 0xec, 0x3a, 0x5b, 0xfe, 0x1c, 0x89, 0xe2, 0xd5,
0xe3, 0x06, 0x70, 0x7c, 0xe9, 0xbd, 0xc8, 0xd7, 0xc1, 0x6d, 0x27, 0x40, 0x79, 0x36, 0x13, 0xec,
0xd2, 0x3b, 0x6c, 0x49, 0xa9, 0x14, 0xa9, 0x59, 0x73, 0xa0, 0xad, 0x78, 0xaf, 0xa9, 0xac, 0x01,
0xc7, 0x73, 0x4d, 0x4c, 0x53, 0x9a, 0x48, 0x26, 0x9a, 0x95, 0xc8, 0x2b, 0x8f, 0x50, 0xf9, 0x30,
0x68, 0xce, 0x67, 0xcb, 0x03, 0x18, 0x9b, 0xaa, 0x14, 0xda, 0xff, 0xd9, 0xad, 0x10, 0x4f, 0xc1,
0x70, 0xac, 0x29, 0xe5, 0xff, 0xcd, 0x17, 0x6f, 0xde, 0x55, 0x22, 0x7b, 0x3b, 0xde, 0x5d, 0xf3,
0xe6, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x84, 0x02, 0xe3, 0x0c, 0x02, 0x00, 0x00,
}

View file

@ -0,0 +1,71 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
syntax = "proto3";
import "messages.proto";
import "control.proto";
package grpc.testing;
service BenchmarkService {
// One request followed by one response.
// The server returns the client payload as-is.
rpc UnaryCall(SimpleRequest) returns (SimpleResponse);
// One request followed by one response.
// The server returns the client payload as-is.
rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse);
}
service WorkerService {
// Start server with specified workload.
// First request sent specifies the ServerConfig followed by ServerStatus
// response. After that, a "Mark" can be sent anytime to request the latest
// stats. Closing the stream will initiate shutdown of the test server
// and once the shutdown has finished, the OK status is sent to terminate
// this RPC.
rpc RunServer(stream ServerArgs) returns (stream ServerStatus);
// Start client with specified workload.
// First request sent specifies the ClientConfig followed by ClientStatus
// response. After that, a "Mark" can be sent anytime to request the latest
// stats. Closing the stream will initiate shutdown of the test client
// and once the shutdown has finished, the OK status is sent to terminate
// this RPC.
rpc RunClient(stream ClientArgs) returns (stream ClientStatus);
// Just return the core count - unary call
rpc CoreCount(CoreRequest) returns (CoreResponse);
// Quit this worker
rpc QuitWorker(Void) returns (Void);
}

View file

@ -0,0 +1,111 @@
// Code generated by protoc-gen-go.
// source: stats.proto
// DO NOT EDIT!
package grpc_testing
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type ServerStats struct {
// wall clock time change in seconds since last reset
TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"`
// change in user time (in seconds) used by the server since last reset
TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser" json:"time_user,omitempty"`
// change in server time (in seconds) used by the server process and all
// threads since last reset
TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"`
}
func (m *ServerStats) Reset() { *m = ServerStats{} }
func (m *ServerStats) String() string { return proto.CompactTextString(m) }
func (*ServerStats) ProtoMessage() {}
func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
// Histogram params based on grpc/support/histogram.c
type HistogramParams struct {
Resolution float64 `protobuf:"fixed64,1,opt,name=resolution" json:"resolution,omitempty"`
MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible" json:"max_possible,omitempty"`
}
func (m *HistogramParams) Reset() { *m = HistogramParams{} }
func (m *HistogramParams) String() string { return proto.CompactTextString(m) }
func (*HistogramParams) ProtoMessage() {}
func (*HistogramParams) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} }
// Histogram data based on grpc/support/histogram.c
type HistogramData struct {
Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"`
MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen" json:"min_seen,omitempty"`
MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen" json:"max_seen,omitempty"`
Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"`
SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares" json:"sum_of_squares,omitempty"`
Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"`
}
func (m *HistogramData) Reset() { *m = HistogramData{} }
func (m *HistogramData) String() string { return proto.CompactTextString(m) }
func (*HistogramData) ProtoMessage() {}
func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} }
type ClientStats struct {
// Latency histogram. Data points are in nanoseconds.
Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"`
// See ServerStats for details.
TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"`
TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser" json:"time_user,omitempty"`
TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"`
}
func (m *ClientStats) Reset() { *m = ClientStats{} }
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
func (*ClientStats) ProtoMessage() {}
func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} }
func (m *ClientStats) GetLatencies() *HistogramData {
if m != nil {
return m.Latencies
}
return nil
}
func init() {
proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats")
proto.RegisterType((*HistogramParams)(nil), "grpc.testing.HistogramParams")
proto.RegisterType((*HistogramData)(nil), "grpc.testing.HistogramData")
proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats")
}
func init() { proto.RegisterFile("stats.proto", fileDescriptor4) }
var fileDescriptor4 = []byte{
// 342 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x92, 0x4f, 0x4f, 0xe3, 0x30,
0x10, 0xc5, 0x95, 0xa6, 0xed, 0xb6, 0x93, 0x76, 0x77, 0x65, 0xad, 0x56, 0x41, 0x95, 0xf8, 0x13,
0x71, 0xe8, 0x29, 0x07, 0x38, 0x71, 0x06, 0x24, 0x6e, 0x54, 0x0d, 0x9c, 0x23, 0x37, 0x4c, 0x2b,
0x8b, 0xc4, 0x0e, 0x99, 0x09, 0x2a, 0x1f, 0x09, 0xf1, 0x25, 0x71, 0x9c, 0x08, 0x0a, 0x48, 0x70,
0x49, 0xf2, 0x7e, 0x6f, 0x34, 0xe3, 0xc9, 0x33, 0x04, 0xc4, 0x92, 0x29, 0x2e, 0x2b, 0xc3, 0x46,
0x4c, 0x36, 0x55, 0x99, 0xc5, 0x8c, 0xc4, 0x4a, 0x6f, 0x22, 0x0d, 0x41, 0x82, 0xd5, 0x23, 0x56,
0x49, 0x53, 0x22, 0x8e, 0x60, 0xc2, 0xaa, 0xc0, 0x14, 0x73, 0x59, 0x12, 0xde, 0x85, 0xde, 0xa1,
0x37, 0xf7, 0x96, 0x41, 0xc3, 0x2e, 0x5b, 0x24, 0x66, 0x30, 0x76, 0x25, 0x35, 0x61, 0x15, 0xf6,
0x9c, 0x3f, 0x6a, 0xc0, 0xad, 0xd5, 0xe2, 0x00, 0x5c, 0x6d, 0x4a, 0x4f, 0xc4, 0x58, 0x84, 0xbe,
0xb3, 0xa1, 0x41, 0x89, 0x23, 0xd1, 0x0d, 0xfc, 0xb9, 0x52, 0xc4, 0x66, 0x53, 0xc9, 0x62, 0x21,
0xed, 0x83, 0xc4, 0x3e, 0x40, 0x85, 0x64, 0xf2, 0x9a, 0x95, 0xd1, 0xdd, 0xc4, 0x1d, 0xd2, 0x9c,
0xa9, 0x90, 0xdb, 0xb4, 0x34, 0x44, 0x6a, 0x95, 0x63, 0x37, 0x33, 0xb0, 0x6c, 0xd1, 0xa1, 0xe8,
0xc5, 0x83, 0xe9, 0x5b, 0xdb, 0x0b, 0xc9, 0x52, 0xfc, 0x87, 0xe1, 0xaa, 0xce, 0xee, 0x91, 0x6d,
0x43, 0x7f, 0x3e, 0x5d, 0x76, 0x4a, 0xec, 0xc1, 0xa8, 0x50, 0x3a, 0x25, 0x44, 0xdd, 0x35, 0xfa,
0x65, 0x75, 0x62, 0xa5, 0xb3, 0xec, 0x1c, 0x67, 0xf9, 0x9d, 0x25, 0xb7, 0xce, 0xfa, 0x0b, 0x3e,
0xd5, 0x45, 0xd8, 0x77, 0xb4, 0xf9, 0x14, 0xc7, 0xf0, 0xdb, 0xbe, 0x52, 0xb3, 0x4e, 0xe9, 0xa1,
0x96, 0xf6, 0xb4, 0xe1, 0xc0, 0x99, 0x13, 0x4b, 0xaf, 0xd7, 0x49, 0xcb, 0xc4, 0x3f, 0x18, 0x64,
0xa6, 0xd6, 0x1c, 0x0e, 0x9d, 0xd9, 0x8a, 0xe8, 0xd9, 0x83, 0xe0, 0x3c, 0x57, 0xa8, 0xb9, 0xfd,
0xe9, 0x67, 0x30, 0xce, 0x25, 0xa3, 0xce, 0x94, 0x6d, 0xd3, 0xec, 0x1f, 0x9c, 0xcc, 0xe2, 0xdd,
0x94, 0xe2, 0x0f, 0xbb, 0x2d, 0xdf, 0xab, 0xbf, 0xe4, 0xd5, 0xfb, 0x21, 0x2f, 0xff, 0xfb, 0xbc,
0xfa, 0x9f, 0xf3, 0x5a, 0x0d, 0xdd, 0xa5, 0x39, 0x7d, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75,
0x34, 0x90, 0x43, 0x02, 0x00, 0x00,
}

View file

@ -0,0 +1,70 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package grpc.testing;
message ServerStats {
// wall clock time change in seconds since last reset
double time_elapsed = 1;
// change in user time (in seconds) used by the server since last reset
double time_user = 2;
// change in server time (in seconds) used by the server process and all
// threads since last reset
double time_system = 3;
}
// Histogram params based on grpc/support/histogram.c
message HistogramParams {
double resolution = 1; // first bucket is [0, 1 + resolution)
double max_possible = 2; // use enough buckets to allow this value
}
// Histogram data based on grpc/support/histogram.c
message HistogramData {
repeated uint32 bucket = 1;
double min_seen = 2;
double max_seen = 3;
double sum = 4;
double sum_of_squares = 5;
double count = 6;
}
message ClientStats {
// Latency histogram. Data points are in nanoseconds.
HistogramData latencies = 1;
// See ServerStats for details.
double time_elapsed = 2;
double time_user = 3;
double time_system = 4;
}

35
vendor/google.golang.org/grpc/benchmark/server/main.go generated vendored Normal file
View file

@ -0,0 +1,35 @@
package main
import (
"flag"
"math"
"net"
"net/http"
_ "net/http/pprof"
"time"
"google.golang.org/grpc/benchmark"
"google.golang.org/grpc/grpclog"
)
var (
duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark server")
)
func main() {
flag.Parse()
go func() {
lis, err := net.Listen("tcp", ":0")
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
grpclog.Println("Server profiling address: ", lis.Addr().String())
if err := http.Serve(lis, nil); err != nil {
grpclog.Fatalf("Failed to serve: %v", err)
}
}()
addr, stopper := benchmark.StartServer(benchmark.ServerInfo{Addr: ":0", Type: "protobuf"}) // listen on all interfaces
grpclog.Println("Server Address: ", addr)
<-time.After(time.Duration(*duration) * time.Second)
stopper()
}

View file

@ -0,0 +1,15 @@
-----BEGIN CERTIFICATE-----
MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla
Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0
YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT
BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7
+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu
g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd
Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV
HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau
sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m
oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG
Dfcog5wrJytaQ6UA0wE=
-----END CERTIFICATE-----

View file

@ -0,0 +1,16 @@
-----BEGIN PRIVATE KEY-----
MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD
M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf
3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY
AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm
V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY
tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p
dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q
K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR
81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff
DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd
aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2
ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3
XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe
F98XJ7tIFfJq
-----END PRIVATE KEY-----

View file

@ -0,0 +1,16 @@
-----BEGIN CERTIFICATE-----
MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET
MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ
dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx
MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV
BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50
ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco
LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg
zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd
9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw
CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy
em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G
CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6
hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh
y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8
-----END CERTIFICATE-----

View file

@ -0,0 +1,198 @@
package stats
import (
"bytes"
"fmt"
"io"
"log"
"math"
"strconv"
"strings"
)
// Histogram accumulates values in the form of a histogram with
// exponentially increased bucket sizes.
type Histogram struct {
// Count is the total number of values added to the histogram.
Count int64
// Sum is the sum of all the values added to the histogram.
Sum int64
// SumOfSquares is the sum of squares of all values.
SumOfSquares int64
// Min is the minimum of all the values added to the histogram.
Min int64
// Max is the maximum of all the values added to the histogram.
Max int64
// Buckets contains all the buckets of the histogram.
Buckets []HistogramBucket
opts HistogramOptions
logBaseBucketSize float64
oneOverLogOnePlusGrowthFactor float64
}
// HistogramOptions contains the parameters that define the histogram's buckets.
// The first bucket of the created histogram (with index 0) contains [min, min+n)
// where n = BaseBucketSize, min = MinValue.
// Bucket i (i>=1) contains [min + n * m^(i-1), min + n * m^i), where m = 1+GrowthFactor.
// The type of the values is int64.
type HistogramOptions struct {
// NumBuckets is the number of buckets.
NumBuckets int
// GrowthFactor is the growth factor of the buckets. A value of 0.1
// indicates that bucket N+1 will be 10% larger than bucket N.
GrowthFactor float64
// BaseBucketSize is the size of the first bucket.
BaseBucketSize float64
// MinValue is the lower bound of the first bucket.
MinValue int64
}
// HistogramBucket represents one histogram bucket.
type HistogramBucket struct {
// LowBound is the lower bound of the bucket.
LowBound float64
// Count is the number of values in the bucket.
Count int64
}
// NewHistogram returns a pointer to a new Histogram object that was created
// with the provided options.
func NewHistogram(opts HistogramOptions) *Histogram {
if opts.NumBuckets == 0 {
opts.NumBuckets = 32
}
if opts.BaseBucketSize == 0.0 {
opts.BaseBucketSize = 1.0
}
h := Histogram{
Buckets: make([]HistogramBucket, opts.NumBuckets),
Min: math.MaxInt64,
Max: math.MinInt64,
opts: opts,
logBaseBucketSize: math.Log(opts.BaseBucketSize),
oneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor),
}
m := 1.0 + opts.GrowthFactor
delta := opts.BaseBucketSize
h.Buckets[0].LowBound = float64(opts.MinValue)
for i := 1; i < opts.NumBuckets; i++ {
h.Buckets[i].LowBound = float64(opts.MinValue) + delta
delta = delta * m
}
return &h
}
// Print writes textual output of the histogram values.
func (h *Histogram) Print(w io.Writer) {
avg := float64(h.Sum) / float64(h.Count)
fmt.Fprintf(w, "Count: %d Min: %d Max: %d Avg: %.2f\n", h.Count, h.Min, h.Max, avg)
fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60))
if h.Count <= 0 {
return
}
maxBucketDigitLen := len(strconv.FormatFloat(h.Buckets[len(h.Buckets)-1].LowBound, 'f', 6, 64))
if maxBucketDigitLen < 3 {
// For "inf".
maxBucketDigitLen = 3
}
maxCountDigitLen := len(strconv.FormatInt(h.Count, 10))
percentMulti := 100 / float64(h.Count)
accCount := int64(0)
for i, b := range h.Buckets {
fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound)
if i+1 < len(h.Buckets) {
fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound)
} else {
fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf")
}
accCount += b.Count
fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti)
const barScale = 0.1
barLength := int(float64(b.Count)*percentMulti*barScale + 0.5)
fmt.Fprintf(w, " %s\n", strings.Repeat("#", barLength))
}
}
// String returns the textual output of the histogram values as string.
func (h *Histogram) String() string {
var b bytes.Buffer
h.Print(&b)
return b.String()
}
// Clear resets all the content of histogram.
func (h *Histogram) Clear() {
h.Count = 0
h.Sum = 0
h.SumOfSquares = 0
h.Min = math.MaxInt64
h.Max = math.MinInt64
for _, v := range h.Buckets {
v.Count = 0
}
}
// Opts returns a copy of the options used to create the Histogram.
func (h *Histogram) Opts() HistogramOptions {
return h.opts
}
// Add adds a value to the histogram.
func (h *Histogram) Add(value int64) error {
bucket, err := h.findBucket(value)
if err != nil {
return err
}
h.Buckets[bucket].Count++
h.Count++
h.Sum += value
h.SumOfSquares += value * value
if value < h.Min {
h.Min = value
}
if value > h.Max {
h.Max = value
}
return nil
}
func (h *Histogram) findBucket(value int64) (int, error) {
delta := float64(value - h.opts.MinValue)
var b int
if delta >= h.opts.BaseBucketSize {
// b = log_{1+growthFactor} (delta / baseBucketSize) + 1
// = log(delta / baseBucketSize) / log(1+growthFactor) + 1
// = (log(delta) - log(baseBucketSize)) * (1 / log(1+growthFactor)) + 1
b = int((math.Log(delta)-h.logBaseBucketSize)*h.oneOverLogOnePlusGrowthFactor + 1)
}
if b >= len(h.Buckets) {
return 0, fmt.Errorf("no bucket for value: %d", value)
}
return b, nil
}
// Merge takes another histogram h2, and merges its content into h.
// The two histograms must be created by equivalent HistogramOptions.
func (h *Histogram) Merge(h2 *Histogram) {
if h.opts != h2.opts {
log.Fatalf("failed to merge histograms, created by inequivalent options")
}
h.Count += h2.Count
h.Sum += h2.Sum
h.SumOfSquares += h2.SumOfSquares
if h2.Min < h.Min {
h.Min = h2.Min
}
if h2.Max > h.Max {
h.Max = h2.Max
}
for i, b := range h2.Buckets {
h.Buckets[i].Count += b.Count
}
}

116
vendor/google.golang.org/grpc/benchmark/stats/stats.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package stats
import (
"bytes"
"fmt"
"io"
"math"
"time"
)
// Stats is a simple helper for gathering additional statistics like histogram
// during benchmarks. This is not thread safe.
type Stats struct {
numBuckets int
unit time.Duration
min, max int64
histogram *Histogram
durations durationSlice
dirty bool
}
type durationSlice []time.Duration
// NewStats creates a new Stats instance. If numBuckets is not positive,
// the default value (16) will be used.
func NewStats(numBuckets int) *Stats {
if numBuckets <= 0 {
numBuckets = 16
}
return &Stats{
// Use one more bucket for the last unbounded bucket.
numBuckets: numBuckets + 1,
durations: make(durationSlice, 0, 100000),
}
}
// Add adds an elapsed time per operation to the stats.
func (stats *Stats) Add(d time.Duration) {
stats.durations = append(stats.durations, d)
stats.dirty = true
}
// Clear resets the stats, removing all values.
func (stats *Stats) Clear() {
stats.durations = stats.durations[:0]
stats.histogram = nil
stats.dirty = false
}
// maybeUpdate updates internal stat data if there was any newly added
// stats since this was updated.
func (stats *Stats) maybeUpdate() {
if !stats.dirty {
return
}
stats.min = math.MaxInt64
stats.max = 0
for _, d := range stats.durations {
if stats.min > int64(d) {
stats.min = int64(d)
}
if stats.max < int64(d) {
stats.max = int64(d)
}
}
// Use the largest unit that can represent the minimum time duration.
stats.unit = time.Nanosecond
for _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} {
if stats.min <= int64(u) {
break
}
stats.unit = u
}
// Adjust the min/max according to the new unit.
stats.min /= int64(stats.unit)
stats.max /= int64(stats.unit)
numBuckets := stats.numBuckets
if n := int(stats.max - stats.min + 1); n < numBuckets {
numBuckets = n
}
stats.histogram = NewHistogram(HistogramOptions{
NumBuckets: numBuckets,
// max-min(lower bound of last bucket) = (1 + growthFactor)^(numBuckets-2) * baseBucketSize.
GrowthFactor: math.Pow(float64(stats.max-stats.min), 1/float64(numBuckets-2)) - 1,
BaseBucketSize: 1.0,
MinValue: stats.min})
for _, d := range stats.durations {
stats.histogram.Add(int64(d / stats.unit))
}
stats.dirty = false
}
// Print writes textual output of the Stats.
func (stats *Stats) Print(w io.Writer) {
stats.maybeUpdate()
if stats.histogram == nil {
fmt.Fprint(w, "Histogram (empty)\n")
} else {
fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:])
stats.histogram.Print(w)
}
}
// String returns the textual output of the Stats as string.
func (stats *Stats) String() string {
var b bytes.Buffer
stats.Print(&b)
return b.String()
}

191
vendor/google.golang.org/grpc/benchmark/stats/util.go generated vendored Normal file
View file

@ -0,0 +1,191 @@
package stats
import (
"bufio"
"bytes"
"fmt"
"os"
"runtime"
"sort"
"strings"
"sync"
"testing"
)
var (
curB *testing.B
curBenchName string
curStats map[string]*Stats
orgStdout *os.File
nextOutPos int
injectCond *sync.Cond
injectDone chan struct{}
)
// AddStats adds a new unnamed Stats instance to the current benchmark. You need
// to run benchmarks by calling RunTestMain() to inject the stats to the
// benchmark results. If numBuckets is not positive, the default value (16) will
// be used. Please note that this calls b.ResetTimer() since it may be blocked
// until the previous benchmark stats is printed out. So AddStats() should
// typically be called at the very beginning of each benchmark function.
func AddStats(b *testing.B, numBuckets int) *Stats {
return AddStatsWithName(b, "", numBuckets)
}
// AddStatsWithName adds a new named Stats instance to the current benchmark.
// With this, you can add multiple stats in a single benchmark. You need
// to run benchmarks by calling RunTestMain() to inject the stats to the
// benchmark results. If numBuckets is not positive, the default value (16) will
// be used. Please note that this calls b.ResetTimer() since it may be blocked
// until the previous benchmark stats is printed out. So AddStatsWithName()
// should typically be called at the very beginning of each benchmark function.
func AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats {
var benchName string
for i := 1; ; i++ {
pc, _, _, ok := runtime.Caller(i)
if !ok {
panic("benchmark function not found")
}
p := strings.Split(runtime.FuncForPC(pc).Name(), ".")
benchName = p[len(p)-1]
if strings.HasPrefix(benchName, "Benchmark") {
break
}
}
procs := runtime.GOMAXPROCS(-1)
if procs != 1 {
benchName = fmt.Sprintf("%s-%d", benchName, procs)
}
stats := NewStats(numBuckets)
if injectCond != nil {
// We need to wait until the previous benchmark stats is printed out.
injectCond.L.Lock()
for curB != nil && curBenchName != benchName {
injectCond.Wait()
}
curB = b
curBenchName = benchName
curStats[name] = stats
injectCond.L.Unlock()
}
b.ResetTimer()
return stats
}
// RunTestMain runs the tests with enabling injection of benchmark stats. It
// returns an exit code to pass to os.Exit.
func RunTestMain(m *testing.M) int {
startStatsInjector()
defer stopStatsInjector()
return m.Run()
}
// startStatsInjector starts stats injection to benchmark results.
func startStatsInjector() {
orgStdout = os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
nextOutPos = 0
resetCurBenchStats()
injectCond = sync.NewCond(&sync.Mutex{})
injectDone = make(chan struct{})
go func() {
defer close(injectDone)
scanner := bufio.NewScanner(r)
scanner.Split(splitLines)
for scanner.Scan() {
injectStatsIfFinished(scanner.Text())
}
if err := scanner.Err(); err != nil {
panic(err)
}
}()
}
// stopStatsInjector stops stats injection and restores os.Stdout.
func stopStatsInjector() {
os.Stdout.Close()
<-injectDone
injectCond = nil
os.Stdout = orgStdout
}
// splitLines is a split function for a bufio.Scanner that returns each line
// of text, teeing texts to the original stdout even before each line ends.
func splitLines(data []byte, eof bool) (advance int, token []byte, err error) {
if eof && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
orgStdout.Write(data[nextOutPos : i+1])
nextOutPos = 0
return i + 1, data[0:i], nil
}
orgStdout.Write(data[nextOutPos:])
nextOutPos = len(data)
if eof {
// This is a final, non-terminated line. Return it.
return len(data), data, nil
}
return 0, nil, nil
}
// injectStatsIfFinished prints out the stats if the current benchmark finishes.
func injectStatsIfFinished(line string) {
injectCond.L.Lock()
defer injectCond.L.Unlock()
// We assume that the benchmark results start with the benchmark name.
if curB == nil || !strings.HasPrefix(line, curBenchName) {
return
}
if !curB.Failed() {
// Output all stats in alphabetical order.
names := make([]string, 0, len(curStats))
for name := range curStats {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
stats := curStats[name]
// The output of stats starts with a header like "Histogram (unit: ms)"
// followed by statistical properties and the buckets. Add the stats name
// if it is a named stats and indent them as Go testing outputs.
lines := strings.Split(stats.String(), "\n")
if n := len(lines); n > 0 {
if name != "" {
name = ": " + name
}
fmt.Fprintf(orgStdout, "--- %s%s\n", lines[0], name)
for _, line := range lines[1 : n-1] {
fmt.Fprintf(orgStdout, "\t%s\n", line)
}
}
}
}
resetCurBenchStats()
injectCond.Signal()
}
// resetCurBenchStats resets the current benchmark stats.
func resetCurBenchStats() {
curB = nil
curBenchName = ""
curStats = make(map[string]*Stats)
}

View file

@ -0,0 +1,399 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package main
import (
"math"
"runtime"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/benchmark"
testpb "google.golang.org/grpc/benchmark/grpc_testing"
"google.golang.org/grpc/benchmark/stats"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
)
var (
caFile = "benchmark/server/testdata/ca.pem"
)
type lockingHistogram struct {
mu sync.Mutex
histogram *stats.Histogram
}
func (h *lockingHistogram) add(value int64) {
h.mu.Lock()
defer h.mu.Unlock()
h.histogram.Add(value)
}
// swap sets h.histogram to new, and returns its old value.
func (h *lockingHistogram) swap(new *stats.Histogram) *stats.Histogram {
h.mu.Lock()
defer h.mu.Unlock()
old := h.histogram
h.histogram = new
return old
}
func (h *lockingHistogram) mergeInto(merged *stats.Histogram) {
h.mu.Lock()
defer h.mu.Unlock()
merged.Merge(h.histogram)
}
type benchmarkClient struct {
closeConns func()
stop chan bool
lastResetTime time.Time
histogramOptions stats.HistogramOptions
lockingHistograms []lockingHistogram
}
func printClientConfig(config *testpb.ClientConfig) {
// Some config options are ignored:
// - client type:
// will always create sync client
// - async client threads.
// - core list
grpclog.Printf(" * client type: %v (ignored, always creates sync client)", config.ClientType)
grpclog.Printf(" * async client threads: %v (ignored)", config.AsyncClientThreads)
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
grpclog.Printf(" * core list: %v (ignored)", config.CoreList)
grpclog.Printf(" - security params: %v", config.SecurityParams)
grpclog.Printf(" - core limit: %v", config.CoreLimit)
grpclog.Printf(" - payload config: %v", config.PayloadConfig)
grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel)
grpclog.Printf(" - channel number: %v", config.ClientChannels)
grpclog.Printf(" - load params: %v", config.LoadParams)
grpclog.Printf(" - rpc type: %v", config.RpcType)
grpclog.Printf(" - histogram params: %v", config.HistogramParams)
grpclog.Printf(" - server targets: %v", config.ServerTargets)
}
func setupClientEnv(config *testpb.ClientConfig) {
// Use all cpu cores available on machine by default.
// TODO: Revisit this for the optimal default setup.
if config.CoreLimit > 0 {
runtime.GOMAXPROCS(int(config.CoreLimit))
} else {
runtime.GOMAXPROCS(runtime.NumCPU())
}
}
// createConns creates connections according to given config.
// It returns the connections and corresponding function to close them.
// It returns non-nil error if there is anything wrong.
func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) {
var opts []grpc.DialOption
// Sanity check for client type.
switch config.ClientType {
case testpb.ClientType_SYNC_CLIENT:
case testpb.ClientType_ASYNC_CLIENT:
default:
return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType)
}
// Check and set security options.
if config.SecurityParams != nil {
creds, err := credentials.NewClientTLSFromFile(abs(caFile), config.SecurityParams.ServerHostOverride)
if err != nil {
return nil, nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err)
}
opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
// Use byteBufCodec if it is required.
if config.PayloadConfig != nil {
switch config.PayloadConfig.Payload.(type) {
case *testpb.PayloadConfig_BytebufParams:
opts = append(opts, grpc.WithCodec(byteBufCodec{}))
case *testpb.PayloadConfig_SimpleParams:
default:
return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
}
}
// Create connections.
connCount := int(config.ClientChannels)
conns := make([]*grpc.ClientConn, connCount, connCount)
for connIndex := 0; connIndex < connCount; connIndex++ {
conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...)
}
return conns, func() {
for _, conn := range conns {
conn.Close()
}
}, nil
}
func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error {
// Read payload size and type from config.
var (
payloadReqSize, payloadRespSize int
payloadType string
)
if config.PayloadConfig != nil {
switch c := config.PayloadConfig.Payload.(type) {
case *testpb.PayloadConfig_BytebufParams:
payloadReqSize = int(c.BytebufParams.ReqSize)
payloadRespSize = int(c.BytebufParams.RespSize)
payloadType = "bytebuf"
case *testpb.PayloadConfig_SimpleParams:
payloadReqSize = int(c.SimpleParams.ReqSize)
payloadRespSize = int(c.SimpleParams.RespSize)
payloadType = "protobuf"
default:
return grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
}
}
// TODO add open loop distribution.
switch config.LoadParams.Load.(type) {
case *testpb.LoadParams_ClosedLoop:
case *testpb.LoadParams_Poisson:
return grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams)
default:
return grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams)
}
rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
switch config.RpcType {
case testpb.RpcType_UNARY:
bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize)
// TODO open loop.
case testpb.RpcType_STREAMING:
bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType)
// TODO open loop.
default:
return grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType)
}
return nil
}
func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) {
printClientConfig(config)
// Set running environment like how many cores to use.
setupClientEnv(config)
conns, closeConns, err := createConns(config)
if err != nil {
return nil, err
}
rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
bc := &benchmarkClient{
histogramOptions: stats.HistogramOptions{
NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1,
GrowthFactor: config.HistogramParams.Resolution,
BaseBucketSize: (1 + config.HistogramParams.Resolution),
MinValue: 0,
},
lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns), rpcCountPerConn*len(conns)),
stop: make(chan bool),
lastResetTime: time.Now(),
closeConns: closeConns,
}
if err = performRPCs(config, conns, bc); err != nil {
// Close all connections if performRPCs failed.
closeConns()
return nil, err
}
return bc, nil
}
func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) {
for ic, conn := range conns {
client := testpb.NewBenchmarkServiceClient(conn)
// For each connection, create rpcCountPerConn goroutines to do rpc.
for j := 0; j < rpcCountPerConn; j++ {
// Create histogram for each goroutine.
idx := ic*rpcCountPerConn + j
bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
// Start goroutine on the created mutex and histogram.
go func(idx int) {
// TODO: do warm up if necessary.
// Now relying on worker client to reserve time to do warm up.
// The worker client needs to wait for some time after client is created,
// before starting benchmark.
done := make(chan bool)
for {
go func() {
start := time.Now()
if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil {
select {
case <-bc.stop:
case done <- false:
}
return
}
elapse := time.Since(start)
bc.lockingHistograms[idx].add(int64(elapse))
select {
case <-bc.stop:
case done <- true:
}
}()
select {
case <-bc.stop:
return
case <-done:
}
}
}(idx)
}
}
}
func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) {
var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error
if payloadType == "bytebuf" {
doRPC = benchmark.DoByteBufStreamingRoundTrip
} else {
doRPC = benchmark.DoStreamingRoundTrip
}
for ic, conn := range conns {
// For each connection, create rpcCountPerConn goroutines to do rpc.
for j := 0; j < rpcCountPerConn; j++ {
c := testpb.NewBenchmarkServiceClient(conn)
stream, err := c.StreamingCall(context.Background())
if err != nil {
grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err)
}
// Create histogram for each goroutine.
idx := ic*rpcCountPerConn + j
bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
// Start goroutine on the created mutex and histogram.
go func(idx int) {
// TODO: do warm up if necessary.
// Now relying on worker client to reserve time to do warm up.
// The worker client needs to wait for some time after client is created,
// before starting benchmark.
done := make(chan bool)
for {
go func() {
start := time.Now()
if err := doRPC(stream, reqSize, respSize); err != nil {
select {
case <-bc.stop:
case done <- false:
}
return
}
elapse := time.Since(start)
bc.lockingHistograms[idx].add(int64(elapse))
select {
case <-bc.stop:
case done <- true:
}
}()
select {
case <-bc.stop:
return
case <-done:
}
}
}(idx)
}
}
}
// getStats returns the stats for benchmark client.
// It resets lastResetTime and all histograms if argument reset is true.
func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats {
var timeElapsed float64
mergedHistogram := stats.NewHistogram(bc.histogramOptions)
if reset {
// Merging histogram may take some time.
// Put all histograms aside and merge later.
toMerge := make([]*stats.Histogram, len(bc.lockingHistograms), len(bc.lockingHistograms))
for i := range bc.lockingHistograms {
toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions))
}
for i := 0; i < len(toMerge); i++ {
mergedHistogram.Merge(toMerge[i])
}
timeElapsed = time.Since(bc.lastResetTime).Seconds()
bc.lastResetTime = time.Now()
} else {
// Merge only, not reset.
for i := range bc.lockingHistograms {
bc.lockingHistograms[i].mergeInto(mergedHistogram)
}
timeElapsed = time.Since(bc.lastResetTime).Seconds()
}
b := make([]uint32, len(mergedHistogram.Buckets), len(mergedHistogram.Buckets))
for i, v := range mergedHistogram.Buckets {
b[i] = uint32(v.Count)
}
return &testpb.ClientStats{
Latencies: &testpb.HistogramData{
Bucket: b,
MinSeen: float64(mergedHistogram.Min),
MaxSeen: float64(mergedHistogram.Max),
Sum: float64(mergedHistogram.Sum),
SumOfSquares: float64(mergedHistogram.SumOfSquares),
Count: float64(mergedHistogram.Count),
},
TimeElapsed: timeElapsed,
TimeUser: 0,
TimeSystem: 0,
}
}
func (bc *benchmarkClient) shutdown() {
close(bc.stop)
bc.closeConns()
}

View file

@ -0,0 +1,173 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package main
import (
"runtime"
"strconv"
"strings"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/benchmark"
testpb "google.golang.org/grpc/benchmark/grpc_testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
)
var (
// File path related to google.golang.org/grpc.
certFile = "benchmark/server/testdata/server1.pem"
keyFile = "benchmark/server/testdata/server1.key"
)
type benchmarkServer struct {
port int
cores int
closeFunc func()
mu sync.RWMutex
lastResetTime time.Time
}
func printServerConfig(config *testpb.ServerConfig) {
// Some config options are ignored:
// - server type:
// will always start sync server
// - async server threads
// - core list
grpclog.Printf(" * server type: %v (ignored, always starts sync server)", config.ServerType)
grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads)
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
grpclog.Printf(" * core list: %v (ignored)", config.CoreList)
grpclog.Printf(" - security params: %v", config.SecurityParams)
grpclog.Printf(" - core limit: %v", config.CoreLimit)
grpclog.Printf(" - port: %v", config.Port)
grpclog.Printf(" - payload config: %v", config.PayloadConfig)
}
func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) {
printServerConfig(config)
// Use all cpu cores available on machine by default.
// TODO: Revisit this for the optimal default setup.
numOfCores := runtime.NumCPU()
if config.CoreLimit > 0 {
numOfCores = int(config.CoreLimit)
}
runtime.GOMAXPROCS(numOfCores)
var opts []grpc.ServerOption
// Sanity check for server type.
switch config.ServerType {
case testpb.ServerType_SYNC_SERVER:
case testpb.ServerType_ASYNC_SERVER:
case testpb.ServerType_ASYNC_GENERIC_SERVER:
default:
return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", config.ServerType)
}
// Set security options.
if config.SecurityParams != nil {
creds, err := credentials.NewServerTLSFromFile(abs(certFile), abs(keyFile))
if err != nil {
grpclog.Fatalf("failed to generate credentials %v", err)
}
opts = append(opts, grpc.Creds(creds))
}
// Priority: config.Port > serverPort > default (0).
port := int(config.Port)
if port == 0 {
port = serverPort
}
// Create different benchmark server according to config.
var (
addr string
closeFunc func()
err error
)
if config.PayloadConfig != nil {
switch payload := config.PayloadConfig.Payload.(type) {
case *testpb.PayloadConfig_BytebufParams:
opts = append(opts, grpc.CustomCodec(byteBufCodec{}))
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Addr: ":" + strconv.Itoa(port),
Type: "bytebuf",
Metadata: payload.BytebufParams.RespSize,
}, opts...)
case *testpb.PayloadConfig_SimpleParams:
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Addr: ":" + strconv.Itoa(port),
Type: "protobuf",
}, opts...)
case *testpb.PayloadConfig_ComplexParams:
return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig)
default:
return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
}
} else {
// Start protobuf server if payload config is nil.
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Addr: ":" + strconv.Itoa(port),
Type: "protobuf",
}, opts...)
}
grpclog.Printf("benchmark server listening at %v", addr)
addrSplitted := strings.Split(addr, ":")
p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1])
if err != nil {
grpclog.Fatalf("failed to get port number from server address: %v", err)
}
return &benchmarkServer{port: p, cores: numOfCores, closeFunc: closeFunc, lastResetTime: time.Now()}, nil
}
// getStats returns the stats for benchmark server.
// It resets lastResetTime if argument reset is true.
func (bs *benchmarkServer) getStats(reset bool) *testpb.ServerStats {
// TODO wall time, sys time, user time.
bs.mu.RLock()
defer bs.mu.RUnlock()
timeElapsed := time.Since(bs.lastResetTime).Seconds()
if reset {
bs.lastResetTime = time.Now()
}
return &testpb.ServerStats{TimeElapsed: timeElapsed, TimeUser: 0, TimeSystem: 0}
}

231
vendor/google.golang.org/grpc/benchmark/worker/main.go generated vendored Normal file
View file

@ -0,0 +1,231 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package main
import (
"flag"
"fmt"
"io"
"net"
"runtime"
"strconv"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
testpb "google.golang.org/grpc/benchmark/grpc_testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
)
var (
driverPort = flag.Int("driver_port", 10000, "port for communication with driver")
serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message")
)
type byteBufCodec struct {
}
func (byteBufCodec) Marshal(v interface{}) ([]byte, error) {
b, ok := v.(*[]byte)
if !ok {
return nil, fmt.Errorf("failed to marshal: %v is not type of *[]byte", v)
}
return *b, nil
}
func (byteBufCodec) Unmarshal(data []byte, v interface{}) error {
b, ok := v.(*[]byte)
if !ok {
return fmt.Errorf("failed to marshal: %v is not type of *[]byte", v)
}
*b = data
return nil
}
func (byteBufCodec) String() string {
return "bytebuffer"
}
// workerServer implements WorkerService rpc handlers.
// It can create benchmarkServer or benchmarkClient on demand.
type workerServer struct {
stop chan<- bool
serverPort int
}
func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error {
var bs *benchmarkServer
defer func() {
// Close benchmark server when stream ends.
grpclog.Printf("closing benchmark server")
if bs != nil {
bs.closeFunc()
}
}()
for {
in, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
var out *testpb.ServerStatus
switch argtype := in.Argtype.(type) {
case *testpb.ServerArgs_Setup:
grpclog.Printf("server setup received:")
if bs != nil {
grpclog.Printf("server setup received when server already exists, closing the existing server")
bs.closeFunc()
}
bs, err = startBenchmarkServer(argtype.Setup, s.serverPort)
if err != nil {
return err
}
out = &testpb.ServerStatus{
Stats: bs.getStats(false),
Port: int32(bs.port),
Cores: int32(bs.cores),
}
case *testpb.ServerArgs_Mark:
grpclog.Printf("server mark received:")
grpclog.Printf(" - %v", argtype)
if bs == nil {
return grpc.Errorf(codes.InvalidArgument, "server does not exist when mark received")
}
out = &testpb.ServerStatus{
Stats: bs.getStats(argtype.Mark.Reset_),
Port: int32(bs.port),
Cores: int32(bs.cores),
}
}
if err := stream.Send(out); err != nil {
return err
}
}
}
func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error {
var bc *benchmarkClient
defer func() {
// Shut down benchmark client when stream ends.
grpclog.Printf("shuting down benchmark client")
if bc != nil {
bc.shutdown()
}
}()
for {
in, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
var out *testpb.ClientStatus
switch t := in.Argtype.(type) {
case *testpb.ClientArgs_Setup:
grpclog.Printf("client setup received:")
if bc != nil {
grpclog.Printf("client setup received when client already exists, shuting down the existing client")
bc.shutdown()
}
bc, err = startBenchmarkClient(t.Setup)
if err != nil {
return err
}
out = &testpb.ClientStatus{
Stats: bc.getStats(false),
}
case *testpb.ClientArgs_Mark:
grpclog.Printf("client mark received:")
grpclog.Printf(" - %v", t)
if bc == nil {
return grpc.Errorf(codes.InvalidArgument, "client does not exist when mark received")
}
out = &testpb.ClientStatus{
Stats: bc.getStats(t.Mark.Reset_),
}
}
if err := stream.Send(out); err != nil {
return err
}
}
}
func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) {
grpclog.Printf("core count: %v", runtime.NumCPU())
return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil
}
func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) {
grpclog.Printf("quiting worker")
s.stop <- true
return &testpb.Void{}, nil
}
func main() {
grpc.EnableTracing = false
flag.Parse()
lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort))
if err != nil {
grpclog.Fatalf("failed to listen: %v", err)
}
grpclog.Printf("worker listening at port %v", *driverPort)
s := grpc.NewServer()
stop := make(chan bool)
testpb.RegisterWorkerServiceServer(s, &workerServer{
stop: stop,
serverPort: *serverPort,
})
go func() {
<-stop
// Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client.
// TODO revise this once server graceful stop is supported in gRPC.
time.Sleep(time.Second)
s.Stop()
}()
s.Serve(lis)
}

75
vendor/google.golang.org/grpc/benchmark/worker/util.go generated vendored Normal file
View file

@ -0,0 +1,75 @@
/*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package main
import (
"log"
"os"
"path/filepath"
)
// abs returns the absolute path the given relative file or directory path,
// relative to the google.golang.org/grpc directory in the user's GOPATH.
// If rel is already absolute, it is returned unmodified.
func abs(rel string) string {
if filepath.IsAbs(rel) {
return rel
}
v, err := goPackagePath("google.golang.org/grpc")
if err != nil {
log.Fatalf("Error finding google.golang.org/grpc/testdata directory: %v", err)
}
return filepath.Join(v, rel)
}
func goPackagePath(pkg string) (path string, err error) {
gp := os.Getenv("GOPATH")
if gp == "" {
return path, os.ErrNotExist
}
for _, p := range filepath.SplitList(gp) {
dir := filepath.Join(p, "src", filepath.FromSlash(pkg))
fi, err := os.Stat(dir)
if os.IsNotExist(err) {
continue
}
if err != nil {
return "", err
}
if !fi.IsDir() {
continue
}
return dir, nil
}
return path, os.ErrNotExist
}

213
vendor/google.golang.org/grpc/call.go generated vendored Normal file
View file

@ -0,0 +1,213 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"bytes"
"io"
"time"
"golang.org/x/net/context"
"golang.org/x/net/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/transport"
)
// recvResponse receives and parses an RPC response.
// On error, it returns the error and indicates whether the call should be retried.
//
// TODO(zhaoq): Check whether the received message sequence is valid.
func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
// Try to acquire header metadata from the server if there is any.
var err error
c.headerMD, err = stream.Header()
if err != nil {
return err
}
p := &parser{r: stream}
for {
if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil {
if err == io.EOF {
break
}
return err
}
}
c.trailerMD = stream.Trailer()
return nil
}
// sendRequest writes out various information of an RPC such as Context and Message.
func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
stream, err := t.NewStream(ctx, callHdr)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
if _, ok := err.(transport.ConnectionError); !ok {
t.CloseStream(stream, err)
}
}
}()
var cbuf *bytes.Buffer
if compressor != nil {
cbuf = new(bytes.Buffer)
}
outBuf, err := encode(codec, args, compressor, cbuf)
if err != nil {
return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
}
err = t.Write(stream, outBuf, opts)
if err != nil {
return nil, err
}
// Sent successfully.
return stream, nil
}
// Invoke sends the RPC request on the wire and returns after response is received.
// Invoke is called by generated code. Also users can call Invoke directly when it
// is really needed in their use cases.
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
c := defaultCallInfo
for _, o := range opts {
if err := o.before(&c); err != nil {
return toRPCErr(err)
}
}
defer func() {
for _, o := range opts {
o.after(&c)
}
}()
if EnableTracing {
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
defer c.traceInfo.tr.Finish()
c.traceInfo.firstLine.client = true
if deadline, ok := ctx.Deadline(); ok {
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
}
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
defer func() {
if err != nil {
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
c.traceInfo.tr.SetError()
}
}()
}
topts := &transport.Options{
Last: true,
Delay: false,
}
for {
var (
err error
t transport.ClientTransport
stream *transport.Stream
// Record the put handler from Balancer.Get(...). It is called once the
// RPC has completed or failed.
put func()
)
// TODO(zhaoq): Need a formal spec of fail-fast.
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
}
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
}
t, put, err = cc.getTransport(ctx, gopts)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok {
return err
}
if err == errConnClosing {
if c.failFast {
return Errorf(codes.Unavailable, "%v", errConnClosing)
}
continue
}
// All the other errors are treated as Internal errors.
return Errorf(codes.Internal, "%v", err)
}
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
}
stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
if err != nil {
if put != nil {
put()
put = nil
}
if _, ok := err.(transport.ConnectionError); ok {
if c.failFast {
return toRPCErr(err)
}
continue
}
return toRPCErr(err)
}
// Receive the response
err = recvResponse(cc.dopts, t, &c, stream, reply)
if err != nil {
if put != nil {
put()
put = nil
}
if _, ok := err.(transport.ConnectionError); ok {
if c.failFast {
return toRPCErr(err)
}
continue
}
t.CloseStream(stream, err)
return toRPCErr(err)
}
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
}
t.CloseStream(stream, nil)
if put != nil {
put()
put = nil
}
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
}
}

278
vendor/google.golang.org/grpc/call_test.go generated vendored Normal file
View file

@ -0,0 +1,278 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"fmt"
"io"
"math"
"net"
"strconv"
"strings"
"sync"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/transport"
)
var (
expectedRequest = "ping"
expectedResponse = "pong"
weirdError = "format verbs: %v%s"
sizeLargeErr = 1024 * 1024
canceled = 0
)
type testCodec struct {
}
func (testCodec) Marshal(v interface{}) ([]byte, error) {
return []byte(*(v.(*string))), nil
}
func (testCodec) Unmarshal(data []byte, v interface{}) error {
*(v.(*string)) = string(data)
return nil
}
func (testCodec) String() string {
return "test"
}
type testStreamHandler struct {
port string
t transport.ServerTransport
}
func (h *testStreamHandler) handleStream(t *testing.T, s *transport.Stream) {
p := &parser{r: s}
for {
pf, req, err := p.recvMsg()
if err == io.EOF {
break
}
if err != nil {
return
}
if pf != compressionNone {
t.Errorf("Received the mistaken message format %d, want %d", pf, compressionNone)
return
}
var v string
codec := testCodec{}
if err := codec.Unmarshal(req, &v); err != nil {
t.Errorf("Failed to unmarshal the received message: %v", err)
return
}
if v == "weird error" {
h.t.WriteStatus(s, codes.Internal, weirdError)
return
}
if v == "canceled" {
canceled++
h.t.WriteStatus(s, codes.Internal, "")
return
}
if v == "port" {
h.t.WriteStatus(s, codes.Internal, h.port)
return
}
if v != expectedRequest {
h.t.WriteStatus(s, codes.Internal, strings.Repeat("A", sizeLargeErr))
return
}
}
// send a response back to end the stream.
reply, err := encode(testCodec{}, &expectedResponse, nil, nil)
if err != nil {
t.Errorf("Failed to encode the response: %v", err)
return
}
h.t.Write(s, reply, &transport.Options{})
h.t.WriteStatus(s, codes.OK, "")
}
type server struct {
lis net.Listener
port string
startedErr chan error // sent nil or an error after server starts
mu sync.Mutex
conns map[transport.ServerTransport]bool
}
func newTestServer() *server {
return &server{startedErr: make(chan error, 1)}
}
// start starts server. Other goroutines should block on s.startedErr for further operations.
func (s *server) start(t *testing.T, port int, maxStreams uint32) {
var err error
if port == 0 {
s.lis, err = net.Listen("tcp", "localhost:0")
} else {
s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port))
}
if err != nil {
s.startedErr <- fmt.Errorf("failed to listen: %v", err)
return
}
_, p, err := net.SplitHostPort(s.lis.Addr().String())
if err != nil {
s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err)
return
}
s.port = p
s.conns = make(map[transport.ServerTransport]bool)
s.startedErr <- nil
for {
conn, err := s.lis.Accept()
if err != nil {
return
}
st, err := transport.NewServerTransport("http2", conn, maxStreams, nil)
if err != nil {
continue
}
s.mu.Lock()
if s.conns == nil {
s.mu.Unlock()
st.Close()
return
}
s.conns[st] = true
s.mu.Unlock()
h := &testStreamHandler{
port: s.port,
t: st,
}
go st.HandleStreams(func(s *transport.Stream) {
go h.handleStream(t, s)
})
}
}
func (s *server) wait(t *testing.T, timeout time.Duration) {
select {
case err := <-s.startedErr:
if err != nil {
t.Fatal(err)
}
case <-time.After(timeout):
t.Fatalf("Timed out after %v waiting for server to be ready", timeout)
}
}
func (s *server) stop() {
s.lis.Close()
s.mu.Lock()
for c := range s.conns {
c.Close()
}
s.conns = nil
s.mu.Unlock()
}
func setUp(t *testing.T, port int, maxStreams uint32) (*server, *ClientConn) {
server := newTestServer()
go server.start(t, port, maxStreams)
server.wait(t, 2*time.Second)
addr := "localhost:" + server.port
cc, err := Dial(addr, WithBlock(), WithInsecure(), WithCodec(testCodec{}))
if err != nil {
t.Fatalf("Failed to create ClientConn: %v", err)
}
return server, cc
}
func TestInvoke(t *testing.T) {
server, cc := setUp(t, 0, math.MaxUint32)
var reply string
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse {
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want <nil>", err)
}
cc.Close()
server.stop()
}
func TestInvokeLargeErr(t *testing.T) {
server, cc := setUp(t, 0, math.MaxUint32)
var reply string
req := "hello"
err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc)
if _, ok := err.(*rpcError); !ok {
t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.")
}
if Code(err) != codes.Internal || len(ErrorDesc(err)) != sizeLargeErr {
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want an error of code %d and desc size %d", err, codes.Internal, sizeLargeErr)
}
cc.Close()
server.stop()
}
// TestInvokeErrorSpecialChars checks that error messages don't get mangled.
func TestInvokeErrorSpecialChars(t *testing.T) {
server, cc := setUp(t, 0, math.MaxUint32)
var reply string
req := "weird error"
err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc)
if _, ok := err.(*rpcError); !ok {
t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.")
}
if got, want := ErrorDesc(err), weirdError; got != want {
t.Fatalf("grpc.Invoke(_, _, _, _, _) error = %q, want %q", got, want)
}
cc.Close()
server.stop()
}
// TestInvokeCancel checks that an Invoke with a canceled context is not sent.
func TestInvokeCancel(t *testing.T) {
server, cc := setUp(t, 0, math.MaxUint32)
var reply string
req := "canceled"
for i := 0; i < 100; i++ {
ctx, cancel := context.WithCancel(context.Background())
cancel()
Invoke(ctx, "/foo/bar", &req, &reply, cc)
}
if canceled != 0 {
t.Fatalf("received %d of 100 canceled requests", canceled)
}
cc.Close()
server.stop()
}

724
vendor/google.golang.org/grpc/clientconn.go generated vendored Normal file
View file

@ -0,0 +1,724 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"errors"
"fmt"
"net"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/transport"
)
var (
// ErrClientConnClosing indicates that the operation is illegal because
// the ClientConn is closing.
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
// underlying connections within the specified timeout.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errNoTransportSecurity indicates that there is no transport security
// being set for ClientConn. Users should either set one or explicitly
// call WithInsecure DialOption to disable security.
errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
// errTransportCredentialsMissing indicates that users want to transmit security
// information (e.g., oauth2 token) which requires secure connection on an insecure
// connection.
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
// errCredentialsConflict indicates that grpc.WithTransportCredentials()
// and grpc.WithInsecure() are both called for a connection.
errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
// errNetworkIP indicates that the connection is down due to some network I/O error.
errNetworkIO = errors.New("grpc: failed with network I/O error")
// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
errNoAddr = errors.New("grpc: there is no address available to dial")
// minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second
)
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
codec Codec
cp Compressor
dc Decompressor
bs backoffStrategy
balancer Balancer
block bool
insecure bool
timeout time.Duration
copts transport.ConnectOptions
}
// DialOption configures how we set up the connection.
type DialOption func(*dialOptions)
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
func WithCodec(c Codec) DialOption {
return func(o *dialOptions) {
o.codec = c
}
}
// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
// compressor.
func WithCompressor(cp Compressor) DialOption {
return func(o *dialOptions) {
o.cp = cp
}
}
// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
// message decompressor.
func WithDecompressor(dc Decompressor) DialOption {
return func(o *dialOptions) {
o.dc = dc
}
}
// WithBalancer returns a DialOption which sets a load balancer.
func WithBalancer(b Balancer) DialOption {
return func(o *dialOptions) {
o.balancer = b
}
}
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
// when backing off after failed connection attempts.
func WithBackoffMaxDelay(md time.Duration) DialOption {
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
}
// WithBackoffConfig configures the dialer to use the provided backoff
// parameters after connection failures.
//
// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
// for use.
func WithBackoffConfig(b BackoffConfig) DialOption {
// Set defaults to ensure that provided BackoffConfig is valid and
// unexported fields get default values.
setDefaults(&b)
return withBackoff(b)
}
// withBackoff sets the backoff strategy used for retries after a
// failed connection attempt.
//
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
func withBackoff(bs backoffStrategy) DialOption {
return func(o *dialOptions) {
o.bs = bs
}
}
// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
// connection is up. Without this, Dial returns immediately and connecting the server
// happens in background.
func WithBlock() DialOption {
return func(o *dialOptions) {
o.block = true
}
}
// WithInsecure returns a DialOption which disables transport security for this ClientConn.
// Note that transport security is required unless WithInsecure is set.
func WithInsecure() DialOption {
return func(o *dialOptions) {
o.insecure = true
}
}
// WithTransportCredentials returns a DialOption which configures a
// connection level security credentials (e.g., TLS/SSL).
func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
return func(o *dialOptions) {
o.copts.TransportCredentials = creds
}
}
// WithPerRPCCredentials returns a DialOption which sets
// credentials which will place auth state on each outbound RPC.
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
return func(o *dialOptions) {
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
}
}
// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
// initially. This is valid if and only if WithBlock() is present.
func WithTimeout(d time.Duration) DialOption {
return func(o *dialOptions) {
o.timeout = d
}
}
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption {
return func(o *dialOptions) {
o.copts.Dialer = f
}
}
// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
func WithUserAgent(s string) DialOption {
return func(o *dialOptions) {
o.copts.UserAgent = s
}
}
// Dial creates a client connection the given target.
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
cc := &ClientConn{
target: target,
conns: make(map[Address]*addrConn),
}
for _, opt := range opts {
opt(&cc.dopts)
}
// Set defaults.
if cc.dopts.codec == nil {
cc.dopts.codec = protoCodec{}
}
if cc.dopts.bs == nil {
cc.dopts.bs = DefaultBackoffConfig
}
if cc.dopts.balancer == nil {
cc.dopts.balancer = RoundRobin(nil)
}
if err := cc.dopts.balancer.Start(target); err != nil {
return nil, err
}
var (
ok bool
addrs []Address
)
ch := cc.dopts.balancer.Notify()
if ch == nil {
// There is no name resolver installed.
addrs = append(addrs, Address{Addr: target})
} else {
addrs, ok = <-ch
if !ok || len(addrs) == 0 {
return nil, errNoAddr
}
}
waitC := make(chan error, 1)
go func() {
for _, a := range addrs {
if err := cc.newAddrConn(a, false); err != nil {
waitC <- err
return
}
}
close(waitC)
}()
var timeoutCh <-chan time.Time
if cc.dopts.timeout > 0 {
timeoutCh = time.After(cc.dopts.timeout)
}
select {
case err := <-waitC:
if err != nil {
cc.Close()
return nil, err
}
case <-timeoutCh:
cc.Close()
return nil, ErrClientConnTimeout
}
if ok {
go cc.lbWatcher()
}
colonPos := strings.LastIndex(target, ":")
if colonPos == -1 {
colonPos = len(target)
}
cc.authority = target[:colonPos]
return cc, nil
}
// ConnectivityState indicates the state of a client connection.
type ConnectivityState int
const (
// Idle indicates the ClientConn is idle.
Idle ConnectivityState = iota
// Connecting indicates the ClienConn is connecting.
Connecting
// Ready indicates the ClientConn is ready for work.
Ready
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
TransientFailure
// Shutdown indicates the ClientConn has started shutting down.
Shutdown
)
func (s ConnectivityState) String() string {
switch s {
case Idle:
return "IDLE"
case Connecting:
return "CONNECTING"
case Ready:
return "READY"
case TransientFailure:
return "TRANSIENT_FAILURE"
case Shutdown:
return "SHUTDOWN"
default:
panic(fmt.Sprintf("unknown connectivity state: %d", s))
}
}
// ClientConn represents a client connection to an RPC server.
type ClientConn struct {
target string
authority string
dopts dialOptions
mu sync.RWMutex
conns map[Address]*addrConn
}
func (cc *ClientConn) lbWatcher() {
for addrs := range cc.dopts.balancer.Notify() {
var (
add []Address // Addresses need to setup connections.
del []*addrConn // Connections need to tear down.
)
cc.mu.Lock()
for _, a := range addrs {
if _, ok := cc.conns[a]; !ok {
add = append(add, a)
}
}
for k, c := range cc.conns {
var keep bool
for _, a := range addrs {
if k == a {
keep = true
break
}
}
if !keep {
del = append(del, c)
}
}
cc.mu.Unlock()
for _, a := range add {
cc.newAddrConn(a, true)
}
for _, c := range del {
c.tearDown(errConnDrain)
}
}
}
func (cc *ClientConn) newAddrConn(addr Address, skipWait bool) error {
ac := &addrConn{
cc: cc,
addr: addr,
dopts: cc.dopts,
shutdownChan: make(chan struct{}),
}
if EnableTracing {
ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr)
}
if !ac.dopts.insecure {
if ac.dopts.copts.TransportCredentials == nil {
return errNoTransportSecurity
}
} else {
if ac.dopts.copts.TransportCredentials != nil {
return errCredentialsConflict
}
for _, cd := range ac.dopts.copts.PerRPCCredentials {
if cd.RequireTransportSecurity() {
return errTransportCredentialsMissing
}
}
}
// Insert ac into ac.cc.conns. This needs to be done before any getTransport(...) is called.
ac.cc.mu.Lock()
if ac.cc.conns == nil {
ac.cc.mu.Unlock()
return ErrClientConnClosing
}
stale := ac.cc.conns[ac.addr]
ac.cc.conns[ac.addr] = ac
ac.cc.mu.Unlock()
if stale != nil {
// There is an addrConn alive on ac.addr already. This could be due to
// i) stale's Close is undergoing;
// ii) a buggy Balancer notifies duplicated Addresses.
stale.tearDown(errConnDrain)
}
ac.stateCV = sync.NewCond(&ac.mu)
// skipWait may overwrite the decision in ac.dopts.block.
if ac.dopts.block && !skipWait {
if err := ac.resetTransport(false); err != nil {
ac.tearDown(err)
return err
}
// Start to monitor the error status of transport.
go ac.transportMonitor()
} else {
// Start a goroutine connecting to the server asynchronously.
go func() {
if err := ac.resetTransport(false); err != nil {
grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err)
ac.tearDown(err)
return
}
ac.transportMonitor()
}()
}
return nil
}
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
addr, put, err := cc.dopts.balancer.Get(ctx, opts)
if err != nil {
return nil, nil, toRPCErr(err)
}
cc.mu.RLock()
if cc.conns == nil {
cc.mu.RUnlock()
return nil, nil, toRPCErr(ErrClientConnClosing)
}
ac, ok := cc.conns[addr]
cc.mu.RUnlock()
if !ok {
if put != nil {
put()
}
return nil, nil, Errorf(codes.Internal, "grpc: failed to find the transport to send the rpc")
}
t, err := ac.wait(ctx, !opts.BlockingWait)
if err != nil {
if put != nil {
put()
}
return nil, nil, err
}
return t, put, nil
}
// Close tears down the ClientConn and all underlying connections.
func (cc *ClientConn) Close() error {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return ErrClientConnClosing
}
conns := cc.conns
cc.conns = nil
cc.mu.Unlock()
cc.dopts.balancer.Close()
for _, ac := range conns {
ac.tearDown(ErrClientConnClosing)
}
return nil
}
// addrConn is a network connection to a given address.
type addrConn struct {
cc *ClientConn
addr Address
dopts dialOptions
shutdownChan chan struct{}
events trace.EventLog
mu sync.Mutex
state ConnectivityState
stateCV *sync.Cond
down func(error) // the handler called when a connection is down.
// ready is closed and becomes nil when a new transport is up or failed
// due to timeout.
ready chan struct{}
transport transport.ClientTransport
}
// printf records an event in ac's event log, unless ac has been closed.
// REQUIRES ac.mu is held.
func (ac *addrConn) printf(format string, a ...interface{}) {
if ac.events != nil {
ac.events.Printf(format, a...)
}
}
// errorf records an error in ac's event log, unless ac has been closed.
// REQUIRES ac.mu is held.
func (ac *addrConn) errorf(format string, a ...interface{}) {
if ac.events != nil {
ac.events.Errorf(format, a...)
}
}
// getState returns the connectivity state of the Conn
func (ac *addrConn) getState() ConnectivityState {
ac.mu.Lock()
defer ac.mu.Unlock()
return ac.state
}
// waitForStateChange blocks until the state changes to something other than the sourceState.
func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
ac.mu.Lock()
defer ac.mu.Unlock()
if sourceState != ac.state {
return ac.state, nil
}
done := make(chan struct{})
var err error
go func() {
select {
case <-ctx.Done():
ac.mu.Lock()
err = ctx.Err()
ac.stateCV.Broadcast()
ac.mu.Unlock()
case <-done:
}
}()
defer close(done)
for sourceState == ac.state {
ac.stateCV.Wait()
if err != nil {
return ac.state, err
}
}
return ac.state, nil
}
func (ac *addrConn) resetTransport(closeTransport bool) error {
var retries int
for {
ac.mu.Lock()
ac.printf("connecting")
if ac.state == Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
return errConnClosing
}
if ac.down != nil {
ac.down(downErrorf(false, true, "%v", errNetworkIO))
ac.down = nil
}
ac.state = Connecting
ac.stateCV.Broadcast()
t := ac.transport
ac.mu.Unlock()
if closeTransport && t != nil {
t.Close()
}
sleepTime := ac.dopts.bs.backoff(retries)
ac.dopts.copts.Timeout = sleepTime
if sleepTime < minConnectTimeout {
ac.dopts.copts.Timeout = minConnectTimeout
}
connectTime := time.Now()
newTransport, err := transport.NewClientTransport(ac.addr.Addr, &ac.dopts.copts)
if err != nil {
ac.mu.Lock()
if ac.state == Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
return errConnClosing
}
ac.errorf("transient failure: %v", err)
ac.state = TransientFailure
ac.stateCV.Broadcast()
if ac.ready != nil {
close(ac.ready)
ac.ready = nil
}
ac.mu.Unlock()
sleepTime -= time.Since(connectTime)
if sleepTime < 0 {
sleepTime = 0
}
closeTransport = false
select {
case <-time.After(sleepTime):
case <-ac.shutdownChan:
}
retries++
grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, ac.addr)
continue
}
ac.mu.Lock()
ac.printf("ready")
if ac.state == Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
newTransport.Close()
return errConnClosing
}
ac.state = Ready
ac.stateCV.Broadcast()
ac.transport = newTransport
if ac.ready != nil {
close(ac.ready)
ac.ready = nil
}
ac.down = ac.cc.dopts.balancer.Up(ac.addr)
ac.mu.Unlock()
return nil
}
}
// Run in a goroutine to track the error in transport and create the
// new transport if an error happens. It returns when the channel is closing.
func (ac *addrConn) transportMonitor() {
for {
ac.mu.Lock()
t := ac.transport
ac.mu.Unlock()
select {
// shutdownChan is needed to detect the teardown when
// the addrConn is idle (i.e., no RPC in flight).
case <-ac.shutdownChan:
return
case <-t.Error():
ac.mu.Lock()
if ac.state == Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
return
}
ac.state = TransientFailure
ac.stateCV.Broadcast()
ac.mu.Unlock()
if err := ac.resetTransport(true); err != nil {
ac.mu.Lock()
ac.printf("transport exiting: %v", err)
ac.mu.Unlock()
grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err)
return
}
}
}
}
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
// iv) transport is in TransientFailure and the RPC is fail-fast.
func (ac *addrConn) wait(ctx context.Context, failFast bool) (transport.ClientTransport, error) {
for {
ac.mu.Lock()
switch {
case ac.state == Shutdown:
ac.mu.Unlock()
return nil, errConnClosing
case ac.state == Ready:
ct := ac.transport
ac.mu.Unlock()
return ct, nil
case ac.state == TransientFailure && failFast:
ac.mu.Unlock()
return nil, Errorf(codes.Unavailable, "grpc: RPC failed fast due to transport failure")
default:
ready := ac.ready
if ready == nil {
ready = make(chan struct{})
ac.ready = ready
}
ac.mu.Unlock()
select {
case <-ctx.Done():
return nil, toRPCErr(ctx.Err())
// Wait until the new transport is ready or failed.
case <-ready:
}
}
}
}
// tearDown starts to tear down the addrConn.
// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
// some edge cases (e.g., the caller opens and closes many addrConn's in a
// tight loop.
func (ac *addrConn) tearDown(err error) {
ac.mu.Lock()
defer func() {
ac.mu.Unlock()
ac.cc.mu.Lock()
if ac.cc.conns != nil {
delete(ac.cc.conns, ac.addr)
}
ac.cc.mu.Unlock()
}()
if ac.state == Shutdown {
return
}
ac.state = Shutdown
if ac.down != nil {
ac.down(downErrorf(false, false, "%v", err))
ac.down = nil
}
ac.stateCV.Broadcast()
if ac.events != nil {
ac.events.Finish()
ac.events = nil
}
if ac.ready != nil {
close(ac.ready)
ac.ready = nil
}
if ac.transport != nil {
if err == errConnDrain {
ac.transport.GracefulClose()
} else {
ac.transport.Close()
}
}
if ac.shutdownChan != nil {
close(ac.shutdownChan)
}
return
}

126
vendor/google.golang.org/grpc/clientconn_test.go generated vendored Normal file
View file

@ -0,0 +1,126 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"testing"
"time"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/oauth"
)
const tlsDir = "testdata/"
func TestDialTimeout(t *testing.T) {
conn, err := Dial("Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure())
if err == nil {
conn.Close()
}
if err != ErrClientConnTimeout {
t.Fatalf("Dial(_, _) = %v, %v, want %v", conn, err, ErrClientConnTimeout)
}
}
func TestTLSDialTimeout(t *testing.T) {
creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com")
if err != nil {
t.Fatalf("Failed to create credentials %v", err)
}
conn, err := Dial("Non-Existent.Server:80", WithTransportCredentials(creds), WithTimeout(time.Millisecond), WithBlock())
if err == nil {
conn.Close()
}
if err != ErrClientConnTimeout {
t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, ErrClientConnTimeout)
}
}
func TestCredentialsMisuse(t *testing.T) {
tlsCreds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com")
if err != nil {
t.Fatalf("Failed to create authenticator %v", err)
}
// Two conflicting credential configurations
if _, err := Dial("Non-Existent.Server:80", WithTransportCredentials(tlsCreds), WithTimeout(time.Millisecond), WithBlock(), WithInsecure()); err != errCredentialsConflict {
t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errCredentialsConflict)
}
rpcCreds, err := oauth.NewJWTAccessFromKey(nil)
if err != nil {
t.Fatalf("Failed to create credentials %v", err)
}
// security info on insecure connection
if _, err := Dial("Non-Existent.Server:80", WithPerRPCCredentials(rpcCreds), WithTimeout(time.Millisecond), WithBlock(), WithInsecure()); err != errTransportCredentialsMissing {
t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredentialsMissing)
}
}
func TestWithBackoffConfigDefault(t *testing.T) {
testBackoffConfigSet(t, &DefaultBackoffConfig)
}
func TestWithBackoffConfig(t *testing.T) {
b := BackoffConfig{MaxDelay: DefaultBackoffConfig.MaxDelay / 2}
expected := b
setDefaults(&expected) // defaults should be set
testBackoffConfigSet(t, &expected, WithBackoffConfig(b))
}
func TestWithBackoffMaxDelay(t *testing.T) {
md := DefaultBackoffConfig.MaxDelay / 2
expected := BackoffConfig{MaxDelay: md}
setDefaults(&expected)
testBackoffConfigSet(t, &expected, WithBackoffMaxDelay(md))
}
func testBackoffConfigSet(t *testing.T, expected *BackoffConfig, opts ...DialOption) {
opts = append(opts, WithInsecure())
conn, err := Dial("foo:80", opts...)
if err != nil {
t.Fatalf("unexpected error dialing connection: %v", err)
}
if conn.dopts.bs == nil {
t.Fatalf("backoff config not set")
}
actual, ok := conn.dopts.bs.(BackoffConfig)
if !ok {
t.Fatalf("unexpected type of backoff config: %#v", conn.dopts.bs)
}
if actual != *expected {
t.Fatalf("unexpected backoff config on connection: %v, want %v", actual, expected)
}
}

17
vendor/google.golang.org/grpc/codegen.sh generated vendored Executable file
View file

@ -0,0 +1,17 @@
#!/bin/bash
# This script serves as an example to demonstrate how to generate the gRPC-Go
# interface and the related messages from .proto file.
#
# It assumes the installation of i) Google proto buffer compiler at
# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
# not, please install them first.
#
# We recommend running this script at $GOPATH/src.
#
# If this is not what you need, feel free to make your own scripts. Again, this
# script is for demonstration purpose.
#
proto=$1
protoc --go_out=plugins=grpc:. $proto

16
vendor/google.golang.org/grpc/codes/code_string.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
// generated by stringer -type=Code; DO NOT EDIT
package codes
import "fmt"
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
func (i Code) String() string {
if i+1 >= Code(len(_Code_index)) {
return fmt.Sprintf("Code(%d)", i)
}
return _Code_name[_Code_index[i]:_Code_index[i+1]]
}

159
vendor/google.golang.org/grpc/codes/codes.go generated vendored Normal file
View file

@ -0,0 +1,159 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package codes defines the canonical error codes used by gRPC. It is
// consistent across various languages.
package codes // import "google.golang.org/grpc/codes"
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
type Code uint32
//go:generate stringer -type=Code
const (
// OK is returned on success.
OK Code = 0
// Canceled indicates the operation was cancelled (typically by the caller).
Canceled Code = 1
// Unknown error. An example of where this error may be returned is
// if a Status value received from another address space belongs to
// an error-space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
Unknown Code = 2
// InvalidArgument indicates client specified an invalid argument.
// Note that this differs from FailedPrecondition. It indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
InvalidArgument Code = 3
// DeadlineExceeded means operation expired before completion.
// For operations that change the state of the system, this error may be
// returned even if the operation has completed successfully. For
// example, a successful response from a server could have been delayed
// long enough for the deadline to expire.
DeadlineExceeded Code = 4
// NotFound means some requested entity (e.g., file or directory) was
// not found.
NotFound Code = 5
// AlreadyExists means an attempt to create an entity failed because one
// already exists.
AlreadyExists Code = 6
// PermissionDenied indicates the caller does not have permission to
// execute the specified operation. It must not be used for rejections
// caused by exhausting some resource (use ResourceExhausted
// instead for those errors). It must not be
// used if the caller cannot be identified (use Unauthenticated
// instead for those errors).
PermissionDenied Code = 7
// Unauthenticated indicates the request does not have valid
// authentication credentials for the operation.
Unauthenticated Code = 16
// ResourceExhausted indicates some resource has been exhausted, perhaps
// a per-user quota, or perhaps the entire file system is out of space.
ResourceExhausted Code = 8
// FailedPrecondition indicates operation was rejected because the
// system is not in a state required for the operation's execution.
// For example, directory to be deleted may be non-empty, an rmdir
// operation is applied to a non-directory, etc.
//
// A litmus test that may help a service implementor in deciding
// between FailedPrecondition, Aborted, and Unavailable:
// (a) Use Unavailable if the client can retry just the failing call.
// (b) Use Aborted if the client should retry at a higher-level
// (e.g., restarting a read-modify-write sequence).
// (c) Use FailedPrecondition if the client should not retry until
// the system state has been explicitly fixed. E.g., if an "rmdir"
// fails because the directory is non-empty, FailedPrecondition
// should be returned since the client should not retry unless
// they have first fixed up the directory by deleting files from it.
// (d) Use FailedPrecondition if the client performs conditional
// REST Get/Update/Delete on a resource and the resource on the
// server does not match the condition. E.g., conflicting
// read-modify-write on the same resource.
FailedPrecondition Code = 9
// Aborted indicates the operation was aborted, typically due to a
// concurrency issue like sequencer check failures, transaction aborts,
// etc.
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
Aborted Code = 10
// OutOfRange means operation was attempted past the valid range.
// E.g., seeking or reading past end of file.
//
// Unlike InvalidArgument, this error indicates a problem that may
// be fixed if the system state changes. For example, a 32-bit file
// system will generate InvalidArgument if asked to read at an
// offset that is not in the range [0,2^32-1], but it will generate
// OutOfRange if asked to read from an offset past the current
// file size.
//
// There is a fair bit of overlap between FailedPrecondition and
// OutOfRange. We recommend using OutOfRange (the more specific
// error) when it applies so that callers who are iterating through
// a space can easily look for an OutOfRange error to detect when
// they are done.
OutOfRange Code = 11
// Unimplemented indicates operation is not implemented or not
// supported/enabled in this service.
Unimplemented Code = 12
// Internal errors. Means some invariants expected by underlying
// system has been broken. If you see one of these errors,
// something is very broken.
Internal Code = 13
// Unavailable indicates the service is currently unavailable.
// This is a most likely a transient condition and may be corrected
// by retrying with a backoff.
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
Unavailable Code = 14
// DataLoss indicates unrecoverable data loss or corruption.
DataLoss Code = 15
)

47
vendor/google.golang.org/grpc/coverage.sh generated vendored Executable file
View file

@ -0,0 +1,47 @@
#!/bin/bash
set -e
workdir=.cover
profile="$workdir/cover.out"
mode=set
end2endtest="google.golang.org/grpc/test"
generate_cover_data() {
rm -rf "$workdir"
mkdir "$workdir"
for pkg in "$@"; do
if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
then
f="$workdir/$(echo $pkg | tr / -)"
go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
fi
done
echo "mode: $mode" >"$profile"
grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
}
show_cover_report() {
go tool cover -${1}="$profile"
}
push_to_coveralls() {
goveralls -coverprofile="$profile"
}
generate_cover_data $(go list ./...)
show_cover_report func
case "$1" in
"")
;;
--html)
show_cover_report html ;;
--coveralls)
push_to_coveralls ;;
*)
echo >&2 "error: invalid option: $1" ;;
esac
rm -rf "$workdir"

View file

@ -0,0 +1,226 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package credentials implements various credentials supported by gRPC library,
// which encapsulate all the state needed by a client to authenticate with a
// server and make various assertions, e.g., about the client's identity, role,
// or whether it is authorized to make a particular call.
package credentials // import "google.golang.org/grpc/credentials"
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"strings"
"time"
"golang.org/x/net/context"
)
var (
// alpnProtoStr are the specified application level protocols for gRPC.
alpnProtoStr = []string{"h2"}
)
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
type PerRPCCredentials interface {
// GetRequestMetadata gets the current request metadata, refreshing
// tokens if required. This should be called by the transport layer on
// each request, and the data should be populated in headers or other
// context. uri is the URI of the entry point for the request. When
// supported by the underlying implementation, ctx can be used for
// timeout and cancellation.
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
// it as an arbitrary string.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.
RequireTransportSecurity() bool
}
// ProtocolInfo provides information regarding the gRPC wire protocol version,
// security protocol, security protocol version in use, etc.
type ProtocolInfo struct {
// ProtocolVersion is the gRPC wire protocol version.
ProtocolVersion string
// SecurityProtocol is the security protocol in use.
SecurityProtocol string
// SecurityVersion is the security protocol version.
SecurityVersion string
}
// AuthInfo defines the common interface for the auth information the users are interested in.
type AuthInfo interface {
AuthType() string
}
// TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL).
type TransportCredentials interface {
// ClientHandshake does the authentication handshake specified by the corresponding
// authentication protocol on rawConn for clients. It returns the authenticated
// connection and the corresponding auth information about the connection.
ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
// the connection.
ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
// Info provides the ProtocolInfo of this TransportCredentials.
Info() ProtocolInfo
}
// TLSInfo contains the auth information for a TLS authenticated connection.
// It implements the AuthInfo interface.
type TLSInfo struct {
State tls.ConnectionState
}
// AuthType returns the type of TLSInfo as a string.
func (t TLSInfo) AuthType() string {
return "tls"
}
// tlsCreds is the credentials required for authenticating a connection using TLS.
type tlsCreds struct {
// TLS configuration
config *tls.Config
}
func (c tlsCreds) Info() ProtocolInfo {
return ProtocolInfo{
SecurityProtocol: "tls",
SecurityVersion: "1.2",
}
}
// GetRequestMetadata returns nil, nil since TLS credentials does not have
// metadata.
func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return nil, nil
}
func (c *tlsCreds) RequireTransportSecurity() bool {
return true
}
type timeoutError struct{}
func (timeoutError) Error() string { return "credentials: Dial timed out" }
func (timeoutError) Timeout() bool { return true }
func (timeoutError) Temporary() bool { return true }
func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) {
// borrow some code from tls.DialWithDialer
var errChannel chan error
if timeout != 0 {
errChannel = make(chan error, 2)
time.AfterFunc(timeout, func() {
errChannel <- timeoutError{}
})
}
if c.config.ServerName == "" {
colonPos := strings.LastIndex(addr, ":")
if colonPos == -1 {
colonPos = len(addr)
}
c.config.ServerName = addr[:colonPos]
}
conn := tls.Client(rawConn, c.config)
if timeout == 0 {
err = conn.Handshake()
} else {
go func() {
errChannel <- conn.Handshake()
}()
err = <-errChannel
}
if err != nil {
rawConn.Close()
return nil, nil, err
}
// TODO(zhaoq): Omit the auth info for client now. It is more for
// information than anything else.
return conn, nil, nil
}
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
conn := tls.Server(rawConn, c.config)
if err := conn.Handshake(); err != nil {
rawConn.Close()
return nil, nil, err
}
return conn, TLSInfo{conn.ConnectionState()}, nil
}
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{c}
tc.config.NextProtos = alpnProtoStr
return tc
}
// NewClientTLSFromCert constructs a TLS from the input certificate for client.
func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportCredentials {
return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp})
}
// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
func NewClientTLSFromFile(certFile, serverName string) (TransportCredentials, error) {
b, err := ioutil.ReadFile(certFile)
if err != nil {
return nil, err
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
return nil, fmt.Errorf("credentials: failed to append certificates")
}
return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil
}
// NewServerTLSFromCert constructs a TLS from the input certificate for server.
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
}
// NewServerTLSFromFile constructs a TLS from the input certificate file and key
// file for server.
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
}

View file

@ -0,0 +1,180 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package oauth implements gRPC credentials using OAuth.
package oauth
import (
"fmt"
"io/ioutil"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"google.golang.org/grpc/credentials"
)
// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource.
type TokenSource struct {
oauth2.TokenSource
}
// GetRequestMetadata gets the request metadata as a map from a TokenSource.
func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
token, err := ts.Token()
if err != nil {
return nil, err
}
return map[string]string{
"authorization": token.Type() + " " + token.AccessToken,
}, nil
}
// RequireTransportSecurity indicates whether the credentails requires transport security.
func (ts TokenSource) RequireTransportSecurity() bool {
return true
}
type jwtAccess struct {
jsonKey []byte
}
// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile.
func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) {
jsonKey, err := ioutil.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
}
return NewJWTAccessFromKey(jsonKey)
}
// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey.
func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) {
return jwtAccess{jsonKey}, nil
}
func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0])
if err != nil {
return nil, err
}
token, err := ts.Token()
if err != nil {
return nil, err
}
return map[string]string{
"authorization": token.TokenType + " " + token.AccessToken,
}, nil
}
func (j jwtAccess) RequireTransportSecurity() bool {
return true
}
// oauthAccess supplies PerRPCCredentials from a given token.
type oauthAccess struct {
token oauth2.Token
}
// NewOauthAccess constructs the PerRPCCredentials using a given token.
func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials {
return oauthAccess{token: *token}
}
func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return map[string]string{
"authorization": oa.token.TokenType + " " + oa.token.AccessToken,
}, nil
}
func (oa oauthAccess) RequireTransportSecurity() bool {
return true
}
// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from
// Google Compute Engine (GCE)'s metadata server. It is only valid to use this
// if your program is running on a GCE instance.
// TODO(dsymonds): Deprecate and remove this.
func NewComputeEngine() credentials.PerRPCCredentials {
return TokenSource{google.ComputeTokenSource("")}
}
// serviceAccount represents PerRPCCredentials via JWT signing key.
type serviceAccount struct {
config *jwt.Config
}
func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
token, err := s.config.TokenSource(ctx).Token()
if err != nil {
return nil, err
}
return map[string]string{
"authorization": token.TokenType + " " + token.AccessToken,
}, nil
}
func (s serviceAccount) RequireTransportSecurity() bool {
return true
}
// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice
// from a Google Developers service account.
func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) {
config, err := google.JWTConfigFromJSON(jsonKey, scope...)
if err != nil {
return nil, err
}
return serviceAccount{config: config}, nil
}
// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file
// of a Google Developers service account.
func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) {
jsonKey, err := ioutil.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
}
return NewServiceAccountFromKey(jsonKey, scope...)
}
// NewApplicationDefault returns "Application Default Credentials". For more
// detail, see https://developers.google.com/accounts/docs/application-default-credentials.
func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) {
t, err := google.DefaultTokenSource(ctx, scope...)
if err != nil {
return nil, err
}
return TokenSource{t}, nil
}

6
vendor/google.golang.org/grpc/doc.go generated vendored Normal file
View file

@ -0,0 +1,6 @@
/*
Package grpc implements an RPC system called gRPC.
See www.grpc.io for more information about gRPC.
*/
package grpc // import "google.golang.org/grpc"

57
vendor/google.golang.org/grpc/examples/README.md generated vendored Normal file
View file

@ -0,0 +1,57 @@
gRPC in 3 minutes (Go)
======================
BACKGROUND
-------------
For this sample, we've already generated the server and client stubs from [helloworld.proto](helloworld/helloworld/helloworld.proto).
PREREQUISITES
-------------
- This requires Go 1.4
- Requires that [GOPATH is set](https://golang.org/doc/code.html#GOPATH)
```
$ go help gopath
$ # ensure the PATH contains $GOPATH/bin
$ export PATH=$PATH:$GOPATH/bin
```
INSTALL
-------
```
$ go get -u google.golang.org/grpc/examples/helloworld/greeter_client
$ go get -u google.golang.org/grpc/examples/helloworld/greeter_server
```
TRY IT!
-------
- Run the server
```
$ greeter_server &
```
- Run the client
```
$ greeter_client
```
OPTIONAL - Rebuilding the generated code
----------------------------------------
1 First [install protoc](https://github.com/google/protobuf/blob/master/README.md)
- For now, this needs to be installed from source
- This is will change once proto3 is officially released
2 Install the protoc Go plugin.
```
$ go get -a github.com/golang/protobuf/protoc-gen-go
$
$ # from this dir; invoke protoc
$ protoc -I ./helloworld/helloworld/ ./helloworld/helloworld/helloworld.proto --go_out=plugins=grpc:helloworld
```

431
vendor/google.golang.org/grpc/examples/gotutorial.md generated vendored Normal file
View file

@ -0,0 +1,431 @@
#gRPC Basics: Go
This tutorial provides a basic Go programmer's introduction to working with gRPC. By walking through this example you'll learn how to:
- Define a service in a .proto file.
- Generate server and client code using the protocol buffer compiler.
- Use the Go gRPC API to write a simple client and server for your service.
It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, which is currently in alpha release:you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository.
This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon.
## Why use gRPC?
Our example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients.
With gRPC we can define our service once in a .proto file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating.
## Example code and setup
The example code for our tutorial is in [grpc/grpc-go/examples/route_guide](https://github.com/grpc/grpc-go/tree/master/examples/route_guide). To download the example, clone the `grpc-go` repository by running the following command:
```shell
$ go get google.golang.org/grpc
```
Then change your current directory to `grpc-go/examples/route_guide`:
```shell
$ cd $GOPATH/src/google.golang.org/grpc/examples/route_guide
```
You also should have the relevant tools installed to generate the server and client interface code - if you don't already, follow the setup instructions in [the Go quick start guide](examples/).
## Defining the service
Our first step (as you'll know from the [quick start](http://www.grpc.io/docs/#quick-start)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). You can see the complete .proto file in [`examples/route_guide/proto/route_guide.proto`](examples/route_guide/proto/route_guide.proto).
To define a service, you specify a named `service` in your .proto file:
```proto
service RouteGuide {
...
}
```
Then you define `rpc` methods inside your service definition, specifying their request and response types. gRPC lets you define four kinds of service method, all of which are used in the `RouteGuide` service:
- A *simple RPC* where the client sends a request to the server using the stub and waits for a response to come back, just like a normal function call.
```proto
// Obtains the feature at a given position.
rpc GetFeature(Point) returns (Feature) {}
```
- A *server-side streaming RPC* where the client sends a request to the server and gets a stream to read a sequence of messages back. The client reads from the returned stream until there are no more messages. As you can see in our example, you specify a server-side streaming method by placing the `stream` keyword before the *response* type.
```proto
// Obtains the Features available within the given Rectangle. Results are
// streamed rather than returned at once (e.g. in a response message with a
// repeated field), as the rectangle may cover a large area and contain a
// huge number of features.
rpc ListFeatures(Rectangle) returns (stream Feature) {}
```
- A *client-side streaming RPC* where the client writes a sequence of messages and sends them to the server, again using a provided stream. Once the client has finished writing the messages, it waits for the server to read them all and return its response. You specify a client-side streaming method by placing the `stream` keyword before the *request* type.
```proto
// Accepts a stream of Points on a route being traversed, returning a
// RouteSummary when traversal is completed.
rpc RecordRoute(stream Point) returns (RouteSummary) {}
```
- A *bidirectional streaming RPC* where both sides send a sequence of messages using a read-write stream. The two streams operate independently, so clients and servers can read and write in whatever order they like: for example, the server could wait to receive all the client messages before writing its responses, or it could alternately read a message then write a message, or some other combination of reads and writes. The order of messages in each stream is preserved. You specify this type of method by placing the `stream` keyword before both the request and the response.
```proto
// Accepts a stream of RouteNotes sent while a route is being traversed,
// while receiving other RouteNotes (e.g. from other users).
rpc RouteChat(stream RouteNote) returns (stream RouteNote) {}
```
Our .proto file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type:
```proto
// Points are represented as latitude-longitude pairs in the E7 representation
// (degrees multiplied by 10**7 and rounded to the nearest integer).
// Latitudes should be in the range +/- 90 degrees and longitude should be in
// the range +/- 180 degrees (inclusive).
message Point {
int32 latitude = 1;
int32 longitude = 2;
}
```
## Generating client and server code
Next we need to generate the gRPC client and server interfaces from our .proto service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin.
For simplicity, we've provided a [bash script](https://github.com/grpc/grpc-go/blob/master/codegen.sh) that runs `protoc` for you with the appropriate plugin, input, and output (if you want to run this by yourself, make sure you've installed protoc and followed the gRPC-Go [installation instructions](https://github.com/grpc/grpc-go/blob/master/README.md) first):
```shell
$ codegen.sh route_guide.proto
```
which actually runs:
```shell
$ protoc --go_out=plugins=grpc:. route_guide.proto
```
Running this command generates the following file in your current directory:
- `route_guide.pb.go`
This contains:
- All the protocol buffer code to populate, serialize, and retrieve our request and response message types
- An interface type (or *stub*) for clients to call with the methods defined in the `RouteGuide` service.
- An interface type for servers to implement, also with the methods defined in the `RouteGuide` service.
<a name="server"></a>
## Creating the server
First let's look at how we create a `RouteGuide` server. If you're only interested in creating gRPC clients, you can skip this section and go straight to [Creating the client](#client) (though you might find it interesting anyway!).
There are two parts to making our `RouteGuide` service do its job:
- Implementing the service interface generated from our service definition: doing the actual "work" of our service.
- Running a gRPC server to listen for requests from clients and dispatch them to the right service implementation.
You can find our example `RouteGuide` server in [grpc-go/examples/route_guide/server/server.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/server/server.go). Let's take a closer look at how it works.
### Implementing RouteGuide
As you can see, our server has a `routeGuideServer` struct type that implements the generated `RouteGuideServer` interface:
```go
type routeGuideServer struct {
...
}
...
func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) {
...
}
...
func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error {
...
}
...
func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error {
...
}
...
func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error {
...
}
...
```
#### Simple RPC
`routeGuideServer` implements all our service methods. Let's look at the simplest type first, `GetFeature`, which just gets a `Point` from the client and returns the corresponding feature information from its database in a `Feature`.
```go
func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) {
for _, feature := range s.savedFeatures {
if proto.Equal(feature.Location, point) {
return feature, nil
}
}
// No feature was found, return an unnamed feature
return &pb.Feature{"", point}, nil
}
```
The method is passed a context object for the RPC and the client's `Point` protocol buffer request. It returns a `Feature` protocol buffer object with the response information and an `error`. In the method we populate the `Feature` with the appropriate information, and then `return` it along with an `nil` error to tell gRPC that we've finished dealing with the RPC and that the `Feature` can be returned to the client.
#### Server-side streaming RPC
Now let's look at one of our streaming RPCs. `ListFeatures` is a server-side streaming RPC, so we need to send back multiple `Feature`s to our client.
```go
func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error {
for _, feature := range s.savedFeatures {
if inRange(feature.Location, rect) {
if err := stream.Send(feature); err != nil {
return err
}
}
}
return nil
}
```
As you can see, instead of getting simple request and response objects in our method parameters, this time we get a request object (the `Rectangle` in which our client wants to find `Feature`s) and a special `RouteGuide_ListFeaturesServer` object to write our responses.
In the method, we populate as many `Feature` objects as we need to return, writing them to the `RouteGuide_ListFeaturesServer` using its `Send()` method. Finally, as in our simple RPC, we return a `nil` error to tell gRPC that we've finished writing responses. Should any error happen in this call, we return a non-`nil` error; the gRPC layer will translate it into an appropriate RPC status to be sent on the wire.
#### Client-side streaming RPC
Now let's look at something a little more complicated: the client-side streaming method `RecordRoute`, where we get a stream of `Point`s from the client and return a single `RouteSummary` with information about their trip. As you can see, this time the method doesn't have a request parameter at all. Instead, it gets a `RouteGuide_RecordRouteServer` stream, which the server can use to both read *and* write messages - it can receive client messages using its `Recv()` method and return its single response using its `SendAndClose()` method.
```go
func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error {
var pointCount, featureCount, distance int32
var lastPoint *pb.Point
startTime := time.Now()
for {
point, err := stream.Recv()
if err == io.EOF {
endTime := time.Now()
return stream.SendAndClose(&pb.RouteSummary{
PointCount: pointCount,
FeatureCount: featureCount,
Distance: distance,
ElapsedTime: int32(endTime.Sub(startTime).Seconds()),
})
}
if err != nil {
return err
}
pointCount++
for _, feature := range s.savedFeatures {
if proto.Equal(feature.Location, point) {
featureCount++
}
}
if lastPoint != nil {
distance += calcDistance(lastPoint, point)
}
lastPoint = point
}
}
```
In the method body we use the `RouteGuide_RecordRouteServer`s `Recv()` method to repeatedly read in our client's requests to a request object (in this case a `Point`) until there are no more messages: the server needs to check the the error returned from `Recv()` after each call. If this is `nil`, the stream is still good and it can continue reading; if it's `io.EOF` the message stream has ended and the server can return its `RouteSummary`. If it has any other value, we return the error "as is" so that it'll be translated to an RPC status by the gRPC layer.
#### Bidirectional streaming RPC
Finally, let's look at our bidirectional streaming RPC `RouteChat()`.
```go
func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error {
for {
in, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
key := serialize(in.Location)
... // look for notes to be sent to client
for _, note := range s.routeNotes[key] {
if err := stream.Send(note); err != nil {
return err
}
}
}
}
```
This time we get a `RouteGuide_RouteChatServer` stream that, as in our client-side streaming example, can be used to read and write messages. However, this time we return values via our method's stream while the client is still writing messages to *their* message stream.
The syntax for reading and writing here is very similar to our client-streaming method, except the server uses the stream's `Send()` method rather than `SendAndClose()` because it's writing multiple responses. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently.
### Starting the server
Once we've implemented all our methods, we also need to start up a gRPC server so that clients can actually use our service. The following snippet shows how we do this for our `RouteGuide` service:
```go
flag.Parse()
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
grpcServer := grpc.NewServer()
pb.RegisterRouteGuideServer(grpcServer, &routeGuideServer{})
... // determine whether to use TLS
grpcServer.Serve(lis)
```
To build and start a server, we:
1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))`.
2. Create an instance of the gRPC server using `grpc.NewServer()`.
3. Register our service implementation with the gRPC server.
4. Call `Serve()` on the server with our port details to do a blocking wait until the process is killed or `Stop()` is called.
<a name="client"></a>
## Creating the client
In this section, we'll look at creating a Go client for our `RouteGuide` service. You can see our complete example client code in [grpc-go/examples/route_guide/client/client.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/client/client.go).
### Creating a stub
To call service methods, we first need to create a gRPC *channel* to communicate with the server. We create this by passing the server address and port number to `grpc.Dial()` as follows:
```go
conn, err := grpc.Dial(*serverAddr)
if err != nil {
...
}
defer conn.Close()
```
You can use `DialOptions` to set the auth credentials (e.g., TLS, GCE credentials, JWT credentials) in `grpc.Dial` if the service you request requires that - however, we don't need to do this for our `RouteGuide` service.
Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our .proto.
```go
client := pb.NewRouteGuideClient(conn)
```
### Calling service methods
Now let's look at how we call our service methods. Note that in gRPC-Go, RPCs operate in a blocking/synchronous mode, which means that the RPC call waits for the server to respond, and will either return a response or an error.
#### Simple RPC
Calling the simple RPC `GetFeature` is nearly as straightforward as calling a local method.
```go
feature, err := client.GetFeature(context.Background(), &pb.Point{409146138, -746188906})
if err != nil {
...
}
```
As you can see, we call the method on the stub we got earlier. In our method parameters we create and populate a request protocol buffer object (in our case `Point`). We also pass a `context.Context` object which lets us change our RPC's behaviour if necessary, such as time-out/cancel an RPC in flight. If the call doesn't return an error, then we can read the response information from the server from the first return value.
```go
log.Println(feature)
```
#### Server-side streaming RPC
Here's where we call the server-side streaming method `ListFeatures`, which returns a stream of geographical `Feature`s. If you've already read [Creating the server](#server) some of this may look very familiar - streaming RPCs are implemented in a similar way on both sides.
```go
rect := &pb.Rectangle{ ... } // initialize a pb.Rectangle
stream, err := client.ListFeatures(context.Background(), rect)
if err != nil {
...
}
for {
feature, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v.ListFeatures(_) = _, %v", client, err)
}
log.Println(feature)
}
```
As in the simple RPC, we pass the method a context and a request. However, instead of getting a response object back, we get back an instance of `RouteGuide_ListFeaturesClient`. The client can use the `RouteGuide_ListFeaturesClient` stream to read the server's responses.
We use the `RouteGuide_ListFeaturesClient`'s `Recv()` method to repeatedly read in the server's responses to a response protocol buffer object (in this case a `Feature`) until there are no more messages: the client needs to check the error `err` returned from `Recv()` after each call. If `nil`, the stream is still good and it can continue reading; if it's `io.EOF` then the message stream has ended; otherwise there must be an RPC error, which is passed over through `err`.
#### Client-side streaming RPC
The client-side streaming method `RecordRoute` is similar to the server-side method, except that we only pass the method a context and get a `RouteGuide_RecordRouteClient` stream back, which we can use to both write *and* read messages.
```go
// Create a random number of random points
r := rand.New(rand.NewSource(time.Now().UnixNano()))
pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points
var points []*pb.Point
for i := 0; i < pointCount; i++ {
points = append(points, randomPoint(r))
}
log.Printf("Traversing %d points.", len(points))
stream, err := client.RecordRoute(context.Background())
if err != nil {
log.Fatalf("%v.RecordRoute(_) = _, %v", client, err)
}
for _, point := range points {
if err := stream.Send(point); err != nil {
log.Fatalf("%v.Send(%v) = %v", stream, point, err)
}
}
reply, err := stream.CloseAndRecv()
if err != nil {
log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
}
log.Printf("Route summary: %v", reply)
```
The `RouteGuide_RecordRouteClient` has a `Send()` method that we can use to send requests to the server. Once we've finished writing our client's requests to the stream using `Send()`, we need to call `CloseAndRecv()` on the stream to let gRPC know that we've finished writing and are expecting to receive a response. We get our RPC status from the `err` returned from `CloseAndRecv()`. If the status is `nil`, then the first return value from `CloseAndRecv()` will be a valid server response.
#### Bidirectional streaming RPC
Finally, let's look at our bidirectional streaming RPC `RouteChat()`. As in the case of `RecordRoute`, we only pass the method a context object and get back a stream that we can use to both write and read messages. However, this time we return values via our method's stream while the server is still writing messages to *their* message stream.
```go
stream, err := client.RouteChat(context.Background())
waitc := make(chan struct{})
go func() {
for {
in, err := stream.Recv()
if err == io.EOF {
// read done.
close(waitc)
return
}
if err != nil {
log.Fatalf("Failed to receive a note : %v", err)
}
log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude)
}
}()
for _, note := range notes {
if err := stream.Send(note); err != nil {
log.Fatalf("Failed to send a note: %v", err)
}
}
stream.CloseSend()
<-waitc
```
The syntax for reading and writing here is very similar to our client-side streaming method, except we use the stream's `CloseSend()` method once we've finished our call. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently.
## Try it out!
To compile and run the server, assuming you are in the folder
`$GOPATH/src/google.golang.org/grpc/examples/route_guide`, simply:
```sh
$ go run server/server.go
```
Likewise, to run the client:
```sh
$ go run client/client.go
```

View file

@ -0,0 +1,69 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package main
import (
"log"
"os"
"golang.org/x/net/context"
"google.golang.org/grpc"
pb "google.golang.org/grpc/examples/helloworld/helloworld"
)
const (
address = "localhost:50051"
defaultName = "world"
)
func main() {
// Set up a connection to the server.
conn, err := grpc.Dial(address, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := pb.NewGreeterClient(conn)
// Contact the server and print out its response.
name := defaultName
if len(os.Args) > 1 {
name = os.Args[1]
}
r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name})
if err != nil {
log.Fatalf("could not greet: %v", err)
}
log.Printf("Greeting: %s", r.Message)
}

View file

@ -0,0 +1,65 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package main
import (
"log"
"net"
"golang.org/x/net/context"
"google.golang.org/grpc"
pb "google.golang.org/grpc/examples/helloworld/helloworld"
)
const (
port = ":50051"
)
// server is used to implement helloworld.GreeterServer.
type server struct{}
// SayHello implements helloworld.GreeterServer
func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
return &pb.HelloReply{Message: "Hello " + in.Name}, nil
}
func main() {
lis, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
pb.RegisterGreeterServer(s, &server{})
s.Serve(lis)
}

View file

@ -0,0 +1,151 @@
// Code generated by protoc-gen-go.
// source: helloworld.proto
// DO NOT EDIT!
/*
Package helloworld is a generated protocol buffer package.
It is generated from these files:
helloworld.proto
It has these top-level messages:
HelloRequest
HelloReply
*/
package helloworld
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The request message containing the user's name.
type HelloRequest struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *HelloRequest) Reset() { *m = HelloRequest{} }
func (m *HelloRequest) String() string { return proto.CompactTextString(m) }
func (*HelloRequest) ProtoMessage() {}
func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
// The response message containing the greetings
type HelloReply struct {
Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
}
func (m *HelloReply) Reset() { *m = HelloReply{} }
func (m *HelloReply) String() string { return proto.CompactTextString(m) }
func (*HelloReply) ProtoMessage() {}
func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func init() {
proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest")
proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion3
// Client API for Greeter service
type GreeterClient interface {
// Sends a greeting
SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error)
}
type greeterClient struct {
cc *grpc.ClientConn
}
func NewGreeterClient(cc *grpc.ClientConn) GreeterClient {
return &greeterClient{cc}
}
func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) {
out := new(HelloReply)
err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Greeter service
type GreeterServer interface {
// Sends a greeting
SayHello(context.Context, *HelloRequest) (*HelloReply, error)
}
func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) {
s.RegisterService(&_Greeter_serviceDesc, srv)
}
func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HelloRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GreeterServer).SayHello(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/helloworld.Greeter/SayHello",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Greeter_serviceDesc = grpc.ServiceDesc{
ServiceName: "helloworld.Greeter",
HandlerType: (*GreeterServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "SayHello",
Handler: _Greeter_SayHello_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: fileDescriptor0,
}
func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 174 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9,
0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88,
0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42,
0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92,
0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71,
0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a,
0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64,
0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x00, 0xad, 0x50, 0x62, 0x70, 0x32, 0xe0, 0x92, 0xce, 0xcc, 0xd7,
0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x4b, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0x46, 0x52, 0xeb,
0xc4, 0x0f, 0x56, 0x1c, 0x0e, 0x62, 0x07, 0x80, 0xbc, 0x14, 0xc0, 0x98, 0xc4, 0x06, 0xf6, 0x9b,
0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00,
}

View file

@ -0,0 +1,52 @@
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
option java_multiple_files = true;
option java_package = "io.grpc.examples.helloworld";
option java_outer_classname = "HelloWorldProto";
package helloworld;
// The greeting service definition.
service Greeter {
// Sends a greeting
rpc SayHello (HelloRequest) returns (HelloReply) {}
}
// The request message containing the user's name.
message HelloRequest {
string name = 1;
}
// The response message containing the greetings
message HelloReply {
string message = 1;
}

Some files were not shown because too many files have changed in this diff Show more