Merge pull request #910 from ericchiang/update-grpc

*: update grpc and correct protobuf generation
This commit is contained in:
Eric Chiang 2017-04-13 13:18:55 -07:00 committed by GitHub
commit 5859fe1091
102 changed files with 28376 additions and 1776 deletions

View file

@ -16,10 +16,6 @@ user=$(shell id -u -n)
group=$(shell id -g -n) group=$(shell id -g -n)
export GOBIN=$(PWD)/bin export GOBIN=$(PWD)/bin
# Prefer ./bin instead of system packages for things like protoc, where we want
# to use the version dex uses, not whatever a developer has installed.
export PATH=$(GOBIN):$(shell printenv PATH)
export GO15VENDOREXPERIMENT=1
LD_FLAGS="-w -X $(REPO_PATH)/version.Version=$(VERSION)" LD_FLAGS="-w -X $(REPO_PATH)/version.Version=$(VERSION)"
@ -74,10 +70,10 @@ docker-image: clean-release _output/bin/dex
proto: api/api.pb.go server/internal/types.pb.go proto: api/api.pb.go server/internal/types.pb.go
api/api.pb.go: api/api.proto bin/protoc bin/protoc-gen-go api/api.pb.go: api/api.proto bin/protoc bin/protoc-gen-go
@protoc --go_out=plugins=grpc:. api/*.proto @./bin/protoc --go_out=plugins=grpc:. --plugin=protoc-gen-go=./bin/protoc-gen-go api/*.proto
server/internal/types.pb.go: server/internal/types.proto bin/protoc bin/protoc-gen-go server/internal/types.pb.go: server/internal/types.proto bin/protoc bin/protoc-gen-go
@protoc --go_out=. server/internal/*.proto @./bin/protoc --go_out=. --plugin=protoc-gen-go=./bin/protoc-gen-go server/internal/*.proto
bin/protoc: scripts/get-protoc bin/protoc: scripts/get-protoc
@./scripts/get-protoc bin/protoc @./scripts/get-protoc bin/protoc

View file

@ -353,7 +353,7 @@ var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file // This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against. // is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion3 const _ = grpc.SupportPackageIsVersion4
// Client API for Dex service // Client API for Dex service
@ -702,14 +702,14 @@ var _Dex_serviceDesc = grpc.ServiceDesc{
}, },
}, },
Streams: []grpc.StreamDesc{}, Streams: []grpc.StreamDesc{},
Metadata: fileDescriptor0, Metadata: "api/api.proto",
} }
func init() { proto.RegisterFile("api/api.proto", fileDescriptor0) } func init() { proto.RegisterFile("api/api.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{ var fileDescriptor0 = []byte{
// 786 bytes of a gzipped FileDescriptorProto // 786 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x55, 0x6d, 0x4f, 0xdb, 0x48, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x6d, 0x4f, 0xdb, 0x48,
0x10, 0x4e, 0xe2, 0x90, 0x38, 0x93, 0xf7, 0x3d, 0x5e, 0x4c, 0xd0, 0x49, 0xb0, 0xe8, 0x24, 0xd0, 0x10, 0x4e, 0xe2, 0x90, 0x38, 0x93, 0xf7, 0x3d, 0x5e, 0x4c, 0xd0, 0x49, 0xb0, 0xe8, 0x24, 0xd0,
0x49, 0x70, 0x70, 0xd2, 0x9d, 0x74, 0xe8, 0xb8, 0x3b, 0xc1, 0xb5, 0x20, 0x55, 0x15, 0xb2, 0x9a, 0x49, 0x70, 0x70, 0xd2, 0x9d, 0x74, 0xe8, 0xb8, 0x3b, 0xc1, 0xb5, 0x20, 0x55, 0x15, 0xb2, 0x9a,
0x7e, 0xac, 0x65, 0xe2, 0x01, 0x56, 0x18, 0xdb, 0xdd, 0xdd, 0x10, 0xda, 0x7f, 0x57, 0xf5, 0x8f, 0x7e, 0xac, 0x65, 0xe2, 0x01, 0x56, 0x18, 0xdb, 0xdd, 0xdd, 0x10, 0xda, 0x7f, 0x57, 0xf5, 0x8f,

28
glide.lock generated
View file

@ -1,5 +1,5 @@
hash: ba77a77f03b1750479aa4a61de59d7a545dc8bd5c71be10e1c2e9afed37e1925 hash: 94ea60e268ee0ed04e8affdee4db2884fd93ee68ed625d88ccc0e24565b98569
updated: 2017-03-24T11:03:21.332291207-07:00 updated: 2017-04-13T11:28:49.008994259-07:00
imports: imports:
- name: github.com/beevik/etree - name: github.com/beevik/etree
version: 4cd0dd976db869f817248477718071a28e978df0 version: 4cd0dd976db869f817248477718071a28e978df0
@ -14,10 +14,12 @@ imports:
- name: github.com/go-sql-driver/mysql - name: github.com/go-sql-driver/mysql
version: 0b58b37b664c21f3010e836f1b931e1d0b0b0685 version: 0b58b37b664c21f3010e836f1b931e1d0b0b0685
- name: github.com/golang/protobuf - name: github.com/golang/protobuf
version: 874264fbbb43f4d91e999fecb4b40143ed611400 version: 2bba0603135d7d7f5cb73b2125beeda19c09f4ef
subpackages: subpackages:
- proto - proto
- protoc-gen-go - protoc-gen-go
- protoc-gen-go/grpc
- ptypes/any
- name: github.com/gorilla/context - name: github.com/gorilla/context
version: aed02d124ae4a0e94fea4541c8effd05bf0c8296 version: aed02d124ae4a0e94fea4541c8effd05bf0c8296
- name: github.com/gorilla/handlers - name: github.com/gorilla/handlers
@ -62,11 +64,12 @@ imports:
- bcrypt - bcrypt
- blowfish - blowfish
- name: golang.org/x/net - name: golang.org/x/net
version: 6a513affb38dc9788b449d59ffed099b8de18fa0 version: 5602c733f70afc6dcec6766be0d5034d4c4f14de
subpackages: subpackages:
- context - context
- http2 - http2
- http2/hpack - http2/hpack
- idna
- internal/timeseries - internal/timeseries
- lex/httplex - lex/httplex
- trace - trace
@ -79,6 +82,13 @@ imports:
version: 833a04a10549a95dc34458c195cbad61bbb6cb4d version: 833a04a10549a95dc34458c195cbad61bbb6cb4d
subpackages: subpackages:
- unix - unix
- name: golang.org/x/text
version: f4b4367115ec2de254587813edaa901bc1c723a8
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/appengine - name: google.golang.org/appengine
version: 267c27e7492265b84fc6719503b14a1e17975d79 version: 267c27e7492265b84fc6719503b14a1e17975d79
subpackages: subpackages:
@ -89,16 +99,24 @@ imports:
- internal/remote_api - internal/remote_api
- internal/urlfetch - internal/urlfetch
- urlfetch - urlfetch
- name: google.golang.org/genproto
version: 411e09b969b1170a9f0c467558eb4c4c110d9c77
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc - name: google.golang.org/grpc
version: b1a2821ca5a4fd6b6e48ddfbb7d6d7584d839d21 version: 0e8b58d22f34640cb17dbbed1c8aef3b8dcc0e97
subpackages: subpackages:
- codes - codes
- credentials - credentials
- grpclog - grpclog
- internal - internal
- keepalive
- metadata - metadata
- naming - naming
- peer - peer
- stats
- status
- tap
- transport - transport
- name: gopkg.in/asn1-ber.v1 - name: gopkg.in/asn1-ber.v1
version: 4e86f4367175e39f69d9358a5f17b4dda270378d version: 4e86f4367175e39f69d9358a5f17b4dda270378d

View file

@ -32,7 +32,7 @@ import:
# Imported directly and by several third party packages. # Imported directly and by several third party packages.
- package: golang.org/x/net - package: golang.org/x/net
version: 6a513affb38dc9788b449d59ffed099b8de18fa0 version: 5602c733f70afc6dcec6766be0d5034d4c4f14de
subpackages: subpackages:
- context - context
- http2 - http2
@ -40,6 +40,8 @@ import:
- internal/timeseries - internal/timeseries
- lex/httplex - lex/httplex
- trace - trace
- package: golang.org/x/text
version: f4b4367115ec2de254587813edaa901bc1c723a8
# Used for parsing configs. # Used for parsing configs.
- package: github.com/ghodss/yaml - package: github.com/ghodss/yaml
@ -109,7 +111,7 @@ import:
# gRPC and protobuf are use for the API. Also import x/net/http2 stack. # gRPC and protobuf are use for the API. Also import x/net/http2 stack.
- package: google.golang.org/grpc - package: google.golang.org/grpc
version: b1a2821ca5a4fd6b6e48ddfbb7d6d7584d839d21 version: 0e8b58d22f34640cb17dbbed1c8aef3b8dcc0e97
subpackages: subpackages:
- codes - codes
- credentials - credentials
@ -120,10 +122,13 @@ import:
- peer - peer
- transport - transport
- package: github.com/golang/protobuf - package: github.com/golang/protobuf
version: 874264fbbb43f4d91e999fecb4b40143ed611400 version: 2bba0603135d7d7f5cb73b2125beeda19c09f4ef
subpackages: subpackages:
- proto - proto
- protoc-gen-go - protoc-gen-go
- protoc-gen-go/grpc
- package: google.golang.org/genproto
version: 411e09b969b1170a9f0c467558eb4c4c110d9c77
# Structured logging # Structured logging
- package: github.com/Sirupsen/logrus - package: github.com/Sirupsen/logrus

View file

@ -63,7 +63,7 @@ func init() { proto.RegisterFile("server/internal/types.proto", fileDescriptor0)
var fileDescriptor0 = []byte{ var fileDescriptor0 = []byte{
// 157 bytes of a gzipped FileDescriptorProto // 157 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x2e, 0x4e, 0x2d, 0x2a, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2e, 0x4e, 0x2d, 0x2a,
0x4b, 0x2d, 0xd2, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x4b, 0x2d, 0xd2, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x2f, 0xa9, 0x2c, 0x48,
0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0x89, 0x2a, 0x39, 0x73, 0xf1, 0x04, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0x89, 0x2a, 0x39, 0x73, 0xf1, 0x04,
0xa5, 0xa6, 0x15, 0xa5, 0x16, 0x67, 0x84, 0xe4, 0x67, 0xa7, 0xe6, 0x09, 0xc9, 0x72, 0x71, 0x15, 0xa5, 0xa6, 0x15, 0xa5, 0x16, 0x67, 0x84, 0xe4, 0x67, 0xa7, 0xe6, 0x09, 0xc9, 0x72, 0x71, 0x15,

View file

@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
// int32, int64, uint32, uint64, bool, and enum // int32, int64, uint32, uint64, bool, and enum
// protocol buffer types. // protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) { func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 { for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) { if n >= len(buf) {
return 0, 0 return 0, 0
@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
return 0, 0 return 0, 0
} }
// DecodeVarint reads a varint-encoded integer from the Buffer. func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
i := p.index i := p.index
l := len(p.buf) l := len(p.buf)
@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
return return
} }
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer. // DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the // This is the format for the
// fixed64, sfixed64, and double protocol buffer types. // fixed64, sfixed64, and double protocol buffer types.
@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Buffer and places the decoded result in pb. If the struct // Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be // underlying pb does not match the data in the buffer, the results can be
// unpredictable. // unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error { func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it. // If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok { if u, ok := pb.(Unmarshaler); ok {

View file

@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) {
} }
p := NewBuffer(nil) p := NewBuffer(nil)
err := p.Marshal(pb) err := p.Marshal(pb)
var state errorState
if err != nil && !state.shouldContinue(err, nil) {
return nil, err
}
if p.buf == nil && err == nil { if p.buf == nil && err == nil {
// Return a non-nil slice on success. // Return a non-nil slice on success.
return []byte{}, nil return []byte{}, nil
@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error {
// Can the object marshal itself? // Can the object marshal itself?
if m, ok := pb.(Marshaler); ok { if m, ok := pb.(Marshaler); ok {
data, err := m.Marshal() data, err := m.Marshal()
if err != nil {
return err
}
p.buf = append(p.buf, data...) p.buf = append(p.buf, data...)
return nil return err
} }
t, base, err := getbase(pb) t, base, err := getbase(pb)
@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error {
} }
if collectStats { if collectStats {
stats.Encode++ (stats).Encode++ // Parens are to work around a goimports bug.
} }
if len(p.buf) > maxMarshalSize { if len(p.buf) > maxMarshalSize {
@ -309,7 +302,7 @@ func Size(pb Message) (n int) {
} }
if collectStats { if collectStats {
stats.Size++ (stats).Size++ // Parens are to work around a goimports bug.
} }
return return
@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
if p.isMarshaler { if p.isMarshaler {
m := structPointer_Interface(structp, p.stype).(Marshaler) m := structPointer_Interface(structp, p.stype).(Marshaler)
data, _ := m.Marshal() data, _ := m.Marshal()
n += len(p.tagcode)
n += sizeRawBytes(data) n += sizeRawBytes(data)
continue continue
} }
@ -1083,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error {
func (o *Buffer) enc_exts(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
exts := structPointer_Extensions(base, p.field) exts := structPointer_Extensions(base, p.field)
if err := encodeExtensions(exts); err != nil {
v, mu := exts.extensionsRead()
if v == nil {
return nil
}
mu.Lock()
defer mu.Unlock()
if err := encodeExtensionsMap(v); err != nil {
return err return err
} }
v, _ := exts.extensionsRead()
return o.enc_map_body(v) return o.enc_map_body(v)
} }

View file

@ -54,13 +54,17 @@ Equality is defined in this way:
in a proto3 .proto file, fields are not "set"; specifically, in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}). zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same, - Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field, and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field) although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
- Two unset fields are equal. - Two unset fields are equal.
- Two unknown field sets are equal if their current - Two unknown field sets are equal if their current
encoded state is equal. encoded state is equal.
- Two extension sets are equal iff they have corresponding - Two extension sets are equal iff they have corresponding
elements that are pairwise equal. elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal. - Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers. The return value is undefined if a and b are not protocol buffers.

View file

@ -154,6 +154,7 @@ type ExtensionDesc struct {
Field int32 // field number Field int32 // field number
Name string // fully-qualified name of extension, for text formatting Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style Tag string // protobuf tag style
Filename string // name of the file in which the extension is defined
} }
func (ed *ExtensionDesc) repeated() bool { func (ed *ExtensionDesc) repeated() bool {
@ -500,6 +501,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
registeredExtensions := RegisteredExtensions(pb) registeredExtensions := RegisteredExtensions(pb)
emap, mu := epb.extensionsRead() emap, mu := epb.extensionsRead()
if emap == nil {
return nil, nil
}
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
extensions := make([]*ExtensionDesc, 0, len(emap)) extensions := make([]*ExtensionDesc, 0, len(emap))

View file

@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
// temporary Buffer and are fine for most applications. // temporary Buffer and are fine for most applications.
type Buffer struct { type Buffer struct {
buf []byte // encode/decode byte stream buf []byte // encode/decode byte stream
index int // write point index int // read point
// pools of basic types to amortize allocation. // pools of basic types to amortize allocation.
bools []bool bools []bool

View file

@ -844,7 +844,15 @@ func RegisterType(x Message, name string) {
} }
// MessageName returns the fully-qualified proto name for the given message type. // MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } func MessageName(x Message) string {
type xname interface {
XXX_MessageName() string
}
if m, ok := x.(xname); ok {
return m.XXX_MessageName()
}
return revProtoTypes[reflect.TypeOf(x)]
}
// MessageType returns the message type (pointer to struct) for a named message. // MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] } func MessageType(name string) reflect.Type { return protoTypes[name] }

View file

@ -44,6 +44,9 @@ import (
"unicode/utf8" "unicode/utf8"
) )
// Error string emitted when deserializing Any and fields are already set
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
type ParseError struct { type ParseError struct {
Message string Message string
Line int // 1-based line number Line int // 1-based line number
@ -508,8 +511,16 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err != nil { if err != nil {
return p.errorf("failed to marshal message of type %q: %v", messageName, err) return p.errorf("failed to marshal message of type %q: %v", messageName, err)
} }
if fieldSet["type_url"] {
return p.errorf(anyRepeatedlyUnpacked, "type_url")
}
if fieldSet["value"] {
return p.errorf(anyRepeatedlyUnpacked, "value")
}
sv.FieldByName("TypeUrl").SetString(extName) sv.FieldByName("TypeUrl").SetString(extName)
sv.FieldByName("Value").SetBytes(b) sv.FieldByName("Value").SetBytes(b)
fieldSet["type_url"] = true
fieldSet["value"] = true
continue continue
} }
@ -581,7 +592,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
props = oop.Prop props = oop.Prop
nv := reflect.New(oop.Type.Elem()) nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0) dst = nv.Elem().Field(0)
sv.Field(oop.Field).Set(nv) field := sv.Field(oop.Field)
if !field.IsNil() {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
}
field.Set(nv)
} }
if !dst.IsValid() { if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st) return p.errorf("unknown field name %q in %v", name, st)
@ -781,12 +796,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props) return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool: case reflect.Bool:
// Either "true", "false", 1 or 0. // true/1/t/True or false/f/0/False.
switch tok.value { switch tok.value {
case "true", "1": case "true", "1", "t", "True":
fv.SetBool(true) fv.SetBool(true)
return nil return nil
case "false", "0": case "false", "0", "f", "False":
fv.SetBool(false) fv.SetBool(false)
return nil return nil
} }

View file

@ -0,0 +1,463 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2015 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package grpc outputs gRPC service descriptions in Go code.
// It runs as a plugin for the Go protocol buffer compiler plugin.
// It is linked in to protoc-gen-go.
package grpc
import (
"fmt"
"path"
"strconv"
"strings"
pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
"github.com/golang/protobuf/protoc-gen-go/generator"
)
// generatedCodeVersion indicates a version of the generated code.
// It is incremented whenever an incompatibility between the generated code and
// the grpc package is introduced; the generated code references
// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion).
const generatedCodeVersion = 4
// Paths for packages used by code generated in this file,
// relative to the import_prefix of the generator.Generator.
const (
contextPkgPath = "golang.org/x/net/context"
grpcPkgPath = "google.golang.org/grpc"
)
func init() {
generator.RegisterPlugin(new(grpc))
}
// grpc is an implementation of the Go protocol buffer compiler's
// plugin architecture. It generates bindings for gRPC support.
type grpc struct {
gen *generator.Generator
}
// Name returns the name of this plugin, "grpc".
func (g *grpc) Name() string {
return "grpc"
}
// The names for packages imported in the generated code.
// They may vary from the final path component of the import path
// if the name is used by other packages.
var (
contextPkg string
grpcPkg string
)
// Init initializes the plugin.
func (g *grpc) Init(gen *generator.Generator) {
g.gen = gen
contextPkg = generator.RegisterUniquePackageName("context", nil)
grpcPkg = generator.RegisterUniquePackageName("grpc", nil)
}
// Given a type name defined in a .proto, return its object.
// Also record that we're using it, to guarantee the associated import.
func (g *grpc) objectNamed(name string) generator.Object {
g.gen.RecordTypeUse(name)
return g.gen.ObjectNamed(name)
}
// Given a type name defined in a .proto, return its name as we will print it.
func (g *grpc) typeName(str string) string {
return g.gen.TypeName(g.objectNamed(str))
}
// P forwards to g.gen.P.
func (g *grpc) P(args ...interface{}) { g.gen.P(args...) }
// Generate generates code for the services in the given file.
func (g *grpc) Generate(file *generator.FileDescriptor) {
if len(file.FileDescriptorProto.Service) == 0 {
return
}
g.P("// Reference imports to suppress errors if they are not otherwise used.")
g.P("var _ ", contextPkg, ".Context")
g.P("var _ ", grpcPkg, ".ClientConn")
g.P()
// Assert version compatibility.
g.P("// This is a compile-time assertion to ensure that this generated file")
g.P("// is compatible with the grpc package it is being compiled against.")
g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion)
g.P()
for i, service := range file.FileDescriptorProto.Service {
g.generateService(file, service, i)
}
}
// GenerateImports generates the import declaration for this file.
func (g *grpc) GenerateImports(file *generator.FileDescriptor) {
if len(file.FileDescriptorProto.Service) == 0 {
return
}
g.P("import (")
g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath)))
g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath)))
g.P(")")
g.P()
}
// reservedClientName records whether a client name is reserved on the client side.
var reservedClientName = map[string]bool{
// TODO: do we need any in gRPC?
}
func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }
// generateService generates all the code for the named service.
func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {
path := fmt.Sprintf("6,%d", index) // 6 means service.
origServName := service.GetName()
fullServName := origServName
if pkg := file.GetPackage(); pkg != "" {
fullServName = pkg + "." + fullServName
}
servName := generator.CamelCase(origServName)
g.P()
g.P("// Client API for ", servName, " service")
g.P()
// Client interface.
g.P("type ", servName, "Client interface {")
for i, method := range service.Method {
g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
g.P(g.generateClientSignature(servName, method))
}
g.P("}")
g.P()
// Client structure.
g.P("type ", unexport(servName), "Client struct {")
g.P("cc *", grpcPkg, ".ClientConn")
g.P("}")
g.P()
// NewClient factory.
g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {")
g.P("return &", unexport(servName), "Client{cc}")
g.P("}")
g.P()
var methodIndex, streamIndex int
serviceDescVar := "_" + servName + "_serviceDesc"
// Client method implementations.
for _, method := range service.Method {
var descExpr string
if !method.GetServerStreaming() && !method.GetClientStreaming() {
// Unary RPC method
descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex)
methodIndex++
} else {
// Streaming RPC method
descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex)
streamIndex++
}
g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr)
}
g.P("// Server API for ", servName, " service")
g.P()
// Server interface.
serverType := servName + "Server"
g.P("type ", serverType, " interface {")
for i, method := range service.Method {
g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
g.P(g.generateServerSignature(servName, method))
}
g.P("}")
g.P()
// Server registration.
g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {")
g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
g.P("}")
g.P()
// Server handler implementations.
var handlerNames []string
for _, method := range service.Method {
hname := g.generateServerMethod(servName, fullServName, method)
handlerNames = append(handlerNames, hname)
}
// Service descriptor.
g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {")
g.P("ServiceName: ", strconv.Quote(fullServName), ",")
g.P("HandlerType: (*", serverType, ")(nil),")
g.P("Methods: []", grpcPkg, ".MethodDesc{")
for i, method := range service.Method {
if method.GetServerStreaming() || method.GetClientStreaming() {
continue
}
g.P("{")
g.P("MethodName: ", strconv.Quote(method.GetName()), ",")
g.P("Handler: ", handlerNames[i], ",")
g.P("},")
}
g.P("},")
g.P("Streams: []", grpcPkg, ".StreamDesc{")
for i, method := range service.Method {
if !method.GetServerStreaming() && !method.GetClientStreaming() {
continue
}
g.P("{")
g.P("StreamName: ", strconv.Quote(method.GetName()), ",")
g.P("Handler: ", handlerNames[i], ",")
if method.GetServerStreaming() {
g.P("ServerStreams: true,")
}
if method.GetClientStreaming() {
g.P("ClientStreams: true,")
}
g.P("},")
}
g.P("},")
g.P("Metadata: \"", file.GetName(), "\",")
g.P("}")
g.P()
}
// generateClientSignature returns the client-side signature for a method.
func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string {
origMethName := method.GetName()
methName := generator.CamelCase(origMethName)
if reservedClientName[methName] {
methName += "_"
}
reqArg := ", in *" + g.typeName(method.GetInputType())
if method.GetClientStreaming() {
reqArg = ""
}
respName := "*" + g.typeName(method.GetOutputType())
if method.GetServerStreaming() || method.GetClientStreaming() {
respName = servName + "_" + generator.CamelCase(origMethName) + "Client"
}
return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName)
}
func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) {
sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName())
methName := generator.CamelCase(method.GetName())
inType := g.typeName(method.GetInputType())
outType := g.typeName(method.GetOutputType())
g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{")
if !method.GetServerStreaming() && !method.GetClientStreaming() {
g.P("out := new(", outType, ")")
// TODO: Pass descExpr to Invoke.
g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`)
g.P("if err != nil { return nil, err }")
g.P("return out, nil")
g.P("}")
g.P()
return
}
streamType := unexport(servName) + methName + "Client"
g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`)
g.P("if err != nil { return nil, err }")
g.P("x := &", streamType, "{stream}")
if !method.GetClientStreaming() {
g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
}
g.P("return x, nil")
g.P("}")
g.P()
genSend := method.GetClientStreaming()
genRecv := method.GetServerStreaming()
genCloseAndRecv := !method.GetServerStreaming()
// Stream auxiliary types and methods.
g.P("type ", servName, "_", methName, "Client interface {")
if genSend {
g.P("Send(*", inType, ") error")
}
if genRecv {
g.P("Recv() (*", outType, ", error)")
}
if genCloseAndRecv {
g.P("CloseAndRecv() (*", outType, ", error)")
}
g.P(grpcPkg, ".ClientStream")
g.P("}")
g.P()
g.P("type ", streamType, " struct {")
g.P(grpcPkg, ".ClientStream")
g.P("}")
g.P()
if genSend {
g.P("func (x *", streamType, ") Send(m *", inType, ") error {")
g.P("return x.ClientStream.SendMsg(m)")
g.P("}")
g.P()
}
if genRecv {
g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {")
g.P("m := new(", outType, ")")
g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
if genCloseAndRecv {
g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {")
g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
g.P("m := new(", outType, ")")
g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
}
// generateServerSignature returns the server-side signature for a method.
func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string {
origMethName := method.GetName()
methName := generator.CamelCase(origMethName)
if reservedClientName[methName] {
methName += "_"
}
var reqArgs []string
ret := "error"
if !method.GetServerStreaming() && !method.GetClientStreaming() {
reqArgs = append(reqArgs, contextPkg+".Context")
ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
}
if !method.GetClientStreaming() {
reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType()))
}
if method.GetServerStreaming() || method.GetClientStreaming() {
reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server")
}
return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
}
func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string {
methName := generator.CamelCase(method.GetName())
hname := fmt.Sprintf("_%s_%s_Handler", servName, methName)
inType := g.typeName(method.GetInputType())
outType := g.typeName(method.GetOutputType())
if !method.GetServerStreaming() && !method.GetClientStreaming() {
g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {")
g.P("in := new(", inType, ")")
g.P("if err := dec(in); err != nil { return nil, err }")
g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }")
g.P("info := &", grpcPkg, ".UnaryServerInfo{")
g.P("Server: srv,")
g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",")
g.P("}")
g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {")
g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))")
g.P("}")
g.P("return interceptor(ctx, in, info, handler)")
g.P("}")
g.P()
return hname
}
streamType := unexport(servName) + methName + "Server"
g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {")
if !method.GetClientStreaming() {
g.P("m := new(", inType, ")")
g.P("if err := stream.RecvMsg(m); err != nil { return err }")
g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})")
} else {
g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})")
}
g.P("}")
g.P()
genSend := method.GetServerStreaming()
genSendAndClose := !method.GetServerStreaming()
genRecv := method.GetClientStreaming()
// Stream auxiliary types and methods.
g.P("type ", servName, "_", methName, "Server interface {")
if genSend {
g.P("Send(*", outType, ") error")
}
if genSendAndClose {
g.P("SendAndClose(*", outType, ") error")
}
if genRecv {
g.P("Recv() (*", inType, ", error)")
}
g.P(grpcPkg, ".ServerStream")
g.P("}")
g.P()
g.P("type ", streamType, " struct {")
g.P(grpcPkg, ".ServerStream")
g.P("}")
g.P()
if genSend {
g.P("func (x *", streamType, ") Send(m *", outType, ") error {")
g.P("return x.ServerStream.SendMsg(m)")
g.P("}")
g.P()
}
if genSendAndClose {
g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {")
g.P("return x.ServerStream.SendMsg(m)")
g.P("}")
g.P()
}
if genRecv {
g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {")
g.P("m := new(", inType, ")")
g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
return hname
}

155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go generated vendored Normal file
View file

@ -0,0 +1,155 @@
// Code generated by protoc-gen-go.
// source: github.com/golang/protobuf/ptypes/any/any.proto
// DO NOT EDIT!
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 187 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
}

View file

@ -7,7 +7,7 @@
// and between processes. // and between processes.
// //
// Incoming requests to a server should create a Context, and outgoing calls to // Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must // servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created // propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue. // using WithDeadline, WithTimeout, WithCancel, or WithValue.
// //
@ -16,14 +16,14 @@
// propagation: // propagation:
// //
// Do not store Contexts inside a struct type; instead, pass a Context // Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first // explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx: // parameter, typically named ctx:
// //
// func DoSomething(ctx context.Context, arg Arg) error { // func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ... // // ... use ctx ...
// } // }
// //
// Do not pass a nil Context, even if a function permits it. Pass context.TODO // Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use. // if you are unsure about which Context to use.
// //
// Use context Values only for request-scoped data that transits processes and // Use context Values only for request-scoped data that transits processes and
@ -44,13 +44,13 @@ import "time"
// Context's methods may be called by multiple goroutines simultaneously. // Context's methods may be called by multiple goroutines simultaneously.
type Context interface { type Context interface {
// Deadline returns the time when work done on behalf of this context // Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is // should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results. // set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool) Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this // Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can // context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value. // never be canceled. Successive calls to Done return the same value.
// //
// WithCancel arranges for Done to be closed when cancel is called; // WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline // WithDeadline arranges for Done to be closed when the deadline
@ -79,24 +79,24 @@ type Context interface {
// a Done channel for cancelation. // a Done channel for cancelation.
Done() <-chan struct{} Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns // Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the // Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined. // context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value. // After Done is closed, successive calls to Err return the same value.
Err() error Err() error
// Value returns the value associated with this context for key, or nil // Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with // if no value is associated with key. Successive calls to Value with
// the same key returns the same result. // the same key returns the same result.
// //
// Use context values only for request-scoped data that transits // Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to // processes and API boundaries, not for passing optional parameters to
// functions. // functions.
// //
// A key identifies a specific value in a Context. Functions that wish // A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global // to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and // variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality; // Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid // packages should define keys as an unexported type to avoid
// collisions. // collisions.
// //
@ -115,7 +115,7 @@ type Context interface {
// // This prevents collisions with keys defined in other packages. // // This prevents collisions with keys defined in other packages.
// type key int // type key int
// //
// // userKey is the key for user.User values in Contexts. It is // // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext // // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly. // // instead of using this key directly.
// var userKey key = 0 // var userKey key = 0
@ -134,14 +134,14 @@ type Context interface {
} }
// Background returns a non-nil, empty Context. It is never canceled, has no // Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function, // values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming // initialization, and tests, and as the top-level Context for incoming
// requests. // requests.
func Background() Context { func Background() Context {
return background return background
} }
// TODO returns a non-nil, empty Context. Code should use context.TODO when // TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the // it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context // surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine // parameter). TODO is recognized by static analysis tools that determine

View file

@ -35,8 +35,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
} }
// WithDeadline returns a copy of the parent context with the deadline adjusted // WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d, // to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned // WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned // context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is // cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first. // closed, whichever happens first.

View file

@ -13,7 +13,7 @@ import (
"time" "time"
) )
// An emptyCtx is never canceled, has no values, and has no deadline. It is not // An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses. // struct{}, since vars of this type must have distinct addresses.
type emptyCtx int type emptyCtx int
@ -104,7 +104,7 @@ func propagateCancel(parent Context, child canceler) {
} }
// parentCancelCtx follows a chain of parent references until it finds a // parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this // *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent. // package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) { func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for { for {
@ -134,14 +134,14 @@ func removeChild(parent Context, child canceler) {
p.mu.Unlock() p.mu.Unlock()
} }
// A canceler is a context type that can be canceled directly. The // A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx. // implementations are *cancelCtx and *timerCtx.
type canceler interface { type canceler interface {
cancel(removeFromParent bool, err error) cancel(removeFromParent bool, err error)
Done() <-chan struct{} Done() <-chan struct{}
} }
// A cancelCtx can be canceled. When canceled, it also cancels any children // A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler. // that implement canceler.
type cancelCtx struct { type cancelCtx struct {
Context Context
@ -193,8 +193,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) {
} }
// WithDeadline returns a copy of the parent context with the deadline adjusted // WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d, // to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned // WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned // context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is // cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first. // closed, whichever happens first.
@ -226,8 +226,8 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
return c, func() { c.cancel(true, Canceled) } return c, func() { c.cancel(true, Canceled) }
} }
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to // A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then // implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel. // delegating to cancelCtx.cancel.
type timerCtx struct { type timerCtx struct {
*cancelCtx *cancelCtx
@ -281,7 +281,7 @@ func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val} return &valueCtx{parent, key, val}
} }
// A valueCtx carries a key-value pair. It implements Value for that key and // A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context. // delegates all other calls to the embedded Context.
type valueCtx struct { type valueCtx struct {
Context Context

View file

@ -247,7 +247,7 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
} }
// noDialClientConnPool is an implementation of http2.ClientConnPool // noDialClientConnPool is an implementation of http2.ClientConnPool
// which never dials. We let the HTTP/1.1 client dial and use its TLS // which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead. // connection instead.
type noDialClientConnPool struct{ *clientConnPool } type noDialClientConnPool struct{ *clientConnPool }

146
vendor/golang.org/x/net/http2/databuffer.go generated vendored Normal file
View file

@ -0,0 +1,146 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
"sync"
)
// Buffer chunks are allocated from a pool to reduce pressure on GC.
// The maximum wasted space per dataBuffer is 2x the largest size class,
// which happens when the dataBuffer has multiple chunks and there is
// one unread byte in both the first and last chunks. We use a few size
// classes to minimize overheads for servers that typically receive very
// small request bodies.
//
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
func getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(dataChunkSizeClasses)-1; i++ {
if size <= int64(dataChunkSizeClasses[i]) {
break
}
}
return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
for i, n := range dataChunkSizeClasses {
if len(p) == n {
dataChunkPools[i].Put(p)
return
}
}
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
// Each dataBuffer is used to read DATA frames on a single stream.
// The buffer is divided into chunks so the server can limit the
// total memory used by a single connection without limiting the
// request body size on any single stream.
type dataBuffer struct {
chunks [][]byte
r int // next byte to read is chunks[0][r]
w int // next byte to write is chunks[len(chunks)-1][w]
size int // total buffered bytes
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
}
var errReadEmpty = errors.New("read from empty dataBuffer")
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *dataBuffer) Read(p []byte) (int, error) {
if b.size == 0 {
return 0, errReadEmpty
}
var ntotal int
for len(p) > 0 && b.size > 0 {
readFrom := b.bytesFromFirstChunk()
n := copy(p, readFrom)
p = p[n:]
ntotal += n
b.r += n
b.size -= n
// If the first chunk has been consumed, advance to the next chunk.
if b.r == len(b.chunks[0]) {
putDataBufferChunk(b.chunks[0])
end := len(b.chunks) - 1
copy(b.chunks[:end], b.chunks[1:])
b.chunks[end] = nil
b.chunks = b.chunks[:end]
b.r = 0
}
}
return ntotal, nil
}
func (b *dataBuffer) bytesFromFirstChunk() []byte {
if len(b.chunks) == 1 {
return b.chunks[0][b.r:b.w]
}
return b.chunks[0][b.r:]
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *dataBuffer) Len() int {
return b.size
}
// Write appends p to the buffer.
func (b *dataBuffer) Write(p []byte) (int, error) {
ntotal := len(p)
for len(p) > 0 {
// If the last chunk is empty, allocate a new chunk. Try to allocate
// enough to fully copy p plus any additional bytes we expect to
// receive. However, this may allocate less than len(p).
want := int64(len(p))
if b.expected > want {
want = b.expected
}
chunk := b.lastChunkOrAlloc(want)
n := copy(chunk[b.w:], p)
p = p[n:]
b.w += n
b.size += n
b.expected -= int64(n)
}
return ntotal, nil
}
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
if len(b.chunks) != 0 {
last := b.chunks[len(b.chunks)-1]
if b.w < len(last) {
return last
}
}
chunk := getDataBufferChunk(want)
b.chunks = append(b.chunks, chunk)
b.w = 0
return chunk
}

View file

@ -64,9 +64,17 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error:
type StreamError struct { type StreamError struct {
StreamID uint32 StreamID uint32
Code ErrCode Code ErrCode
Cause error // optional additional detail
}
func streamError(id uint32, code ErrCode) StreamError {
return StreamError{StreamID: id, Code: code}
} }
func (e StreamError) Error() string { func (e StreamError) Error() string {
if e.Cause != nil {
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
}
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
} }

View file

@ -1,60 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
)
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
// It never allocates, but moves old data as new data is written.
type fixedBuffer struct {
buf []byte
r, w int
}
var (
errReadEmpty = errors.New("read from empty fixedBuffer")
errWriteFull = errors.New("write on full fixedBuffer")
)
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
if b.r == b.w {
return 0, errReadEmpty
}
n = copy(p, b.buf[b.r:b.w])
b.r += n
if b.r == b.w {
b.r = 0
b.w = 0
}
return n, nil
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *fixedBuffer) Len() int {
return b.w - b.r
}
// Write copies bytes from p into the buffer.
// It is an error to write more data than the buffer can hold.
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
// Slide existing data to beginning.
if b.r > 0 && len(p) > len(b.buf)-b.w {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
// Write new data.
n = copy(b.buf[b.w:], p)
b.w += n
if n < len(p) {
err = errWriteFull
}
return n, err
}

View file

@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
// a frameParser parses a frame given its FrameHeader and payload // a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which // bytes. The length of payload will always equal fh.Length (which
// might be 0). // might be 0).
type frameParser func(fh FrameHeader, payload []byte) (Frame, error) type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{ var frameParsers = map[FrameType]frameParser{
FrameData: parseDataFrame, FrameData: parseDataFrame,
@ -312,15 +312,19 @@ type Framer struct {
MaxHeaderListSize uint32 MaxHeaderListSize uint32
// TODO: track which type of frame & with which flags was sent // TODO: track which type of frame & with which flags was sent
// last. Then return an error (unless AllowIllegalWrites) if // last. Then return an error (unless AllowIllegalWrites) if
// we're in the middle of a header block and a // we're in the middle of a header block and a
// non-Continuation or Continuation on a different stream is // non-Continuation or Continuation on a different stream is
// attempted to be written. // attempted to be written.
logReads bool logReads, logWrites bool
debugFramer *Framer // only use for logging written writes debugFramer *Framer // only use for logging written writes
debugFramerBuf *bytes.Buffer debugFramerBuf *bytes.Buffer
debugReadLoggerf func(string, ...interface{})
debugWriteLoggerf func(string, ...interface{})
frameCache *frameCache // nil if frames aren't reused (default)
} }
func (fr *Framer) maxHeaderListSize() uint32 { func (fr *Framer) maxHeaderListSize() uint32 {
@ -355,7 +359,7 @@ func (f *Framer) endWrite() error {
byte(length>>16), byte(length>>16),
byte(length>>8), byte(length>>8),
byte(length)) byte(length))
if logFrameWrites { if f.logWrites {
f.logWrite() f.logWrite()
} }
@ -378,10 +382,10 @@ func (f *Framer) logWrite() {
f.debugFramerBuf.Write(f.wbuf) f.debugFramerBuf.Write(f.wbuf)
fr, err := f.debugFramer.ReadFrame() fr, err := f.debugFramer.ReadFrame()
if err != nil { if err != nil {
log.Printf("http2: Framer %p: failed to decode just-written frame", f) f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
return return
} }
log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
} }
func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
@ -396,12 +400,36 @@ const (
maxFrameSize = 1<<24 - 1 maxFrameSize = 1<<24 - 1
) )
// SetReuseFrames allows the Framer to reuse Frames.
// If called on a Framer, Frames returned by calls to ReadFrame are only
// valid until the next call to ReadFrame.
func (fr *Framer) SetReuseFrames() {
if fr.frameCache != nil {
return
}
fr.frameCache = &frameCache{}
}
type frameCache struct {
dataFrame DataFrame
}
func (fc *frameCache) getDataFrame() *DataFrame {
if fc == nil {
return &DataFrame{}
}
return &fc.dataFrame
}
// NewFramer returns a Framer that writes frames to w and reads them from r. // NewFramer returns a Framer that writes frames to w and reads them from r.
func NewFramer(w io.Writer, r io.Reader) *Framer { func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{ fr := &Framer{
w: w, w: w,
r: r, r: r,
logReads: logFrameReads, logReads: logFrameReads,
logWrites: logFrameWrites,
debugReadLoggerf: log.Printf,
debugWriteLoggerf: log.Printf,
} }
fr.getReadBuf = func(size uint32) []byte { fr.getReadBuf = func(size uint32) []byte {
if cap(fr.readBuf) >= int(size) { if cap(fr.readBuf) >= int(size) {
@ -472,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil { if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err return nil, err
} }
f, err := typeFrameParser(fh.Type)(fh, payload) f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
if err != nil { if err != nil {
if ce, ok := err.(connError); ok { if ce, ok := err.(connError); ok {
return nil, fr.connError(ce.Code, ce.Reason) return nil, fr.connError(ce.Code, ce.Reason)
@ -483,7 +511,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return nil, err return nil, err
} }
if fr.logReads { if fr.logReads {
log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
} }
if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
return fr.readMetaFrame(f.(*HeadersFrame)) return fr.readMetaFrame(f.(*HeadersFrame))
@ -560,7 +588,7 @@ func (f *DataFrame) Data() []byte {
return f.data return f.data
} }
func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a // DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier // DATA frame is received whose stream identifier
@ -569,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
// PROTOCOL_ERROR. // PROTOCOL_ERROR.
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
} }
f := &DataFrame{ f := fc.getDataFrame()
FrameHeader: fh, f.FrameHeader = fh
}
var padSize byte var padSize byte
if fh.Flags.Has(FlagDataPadded) { if fh.Flags.Has(FlagDataPadded) {
var err error var err error
@ -594,6 +622,8 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
var ( var (
errStreamID = errors.New("invalid stream ID") errStreamID = errors.New("invalid stream ID")
errDepStreamID = errors.New("invalid dependent stream ID") errDepStreamID = errors.New("invalid dependent stream ID")
errPadLength = errors.New("pad length too large")
errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
) )
func validStreamIDOrZero(streamID uint32) bool { func validStreamIDOrZero(streamID uint32) bool {
@ -607,18 +637,51 @@ func validStreamID(streamID uint32) bool {
// WriteData writes a DATA frame. // WriteData writes a DATA frame.
// //
// It will perform exactly one Write to the underlying Writer. // It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently. // It is the caller's responsibility not to violate the maximum frame size
// and to not call other Write methods concurrently.
func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
// TODO: ignoring padding for now. will add when somebody cares. return f.WriteDataPadded(streamID, endStream, data, nil)
}
// WriteData writes a DATA frame with optional padding.
//
// If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes.
// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size
// and to not call other Write methods concurrently.
func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
if !validStreamID(streamID) && !f.AllowIllegalWrites { if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID return errStreamID
} }
if len(pad) > 0 {
if len(pad) > 255 {
return errPadLength
}
if !f.AllowIllegalWrites {
for _, b := range pad {
if b != 0 {
// "Padding octets MUST be set to zero when sending."
return errPadBytes
}
}
}
}
var flags Flags var flags Flags
if endStream { if endStream {
flags |= FlagDataEndStream flags |= FlagDataEndStream
} }
if pad != nil {
flags |= FlagDataPadded
}
f.startWrite(FrameData, flags, streamID) f.startWrite(FrameData, flags, streamID)
if pad != nil {
f.wbuf = append(f.wbuf, byte(len(pad)))
}
f.wbuf = append(f.wbuf, data...) f.wbuf = append(f.wbuf, data...)
f.wbuf = append(f.wbuf, pad...)
return f.endWrite() return f.endWrite()
} }
@ -632,10 +695,10 @@ type SettingsFrame struct {
p []byte p []byte
} }
func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the // When this (ACK 0x1) bit is set, the payload of the
// SETTINGS frame MUST be empty. Receipt of a // SETTINGS frame MUST be empty. Receipt of a
// SETTINGS frame with the ACK flag set and a length // SETTINGS frame with the ACK flag set and a length
// field value other than 0 MUST be treated as a // field value other than 0 MUST be treated as a
// connection error (Section 5.4.1) of type // connection error (Section 5.4.1) of type
@ -644,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
} }
if fh.StreamID != 0 { if fh.StreamID != 0 {
// SETTINGS frames always apply to a connection, // SETTINGS frames always apply to a connection,
// never a single stream. The stream identifier for a // never a single stream. The stream identifier for a
// SETTINGS frame MUST be zero (0x0). If an endpoint // SETTINGS frame MUST be zero (0x0). If an endpoint
// receives a SETTINGS frame whose stream identifier // receives a SETTINGS frame whose stream identifier
// field is anything other than 0x0, the endpoint MUST // field is anything other than 0x0, the endpoint MUST
@ -714,7 +777,7 @@ func (f *Framer) WriteSettings(settings ...Setting) error {
return f.endWrite() return f.endWrite()
} }
// WriteSettings writes an empty SETTINGS frame with the ACK bit set. // WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
// //
// It will perform exactly one Write to the underlying Writer. // It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently. // It is the caller's responsibility to not call other Write methods concurrently.
@ -734,7 +797,7 @@ type PingFrame struct {
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if len(payload) != 8 { if len(payload) != 8 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -774,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte {
return f.debugData return f.debugData
} }
func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID != 0 { if fh.StreamID != 0 {
return nil, ConnectionError(ErrCodeProtocol) return nil, ConnectionError(ErrCodeProtocol)
} }
@ -814,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte {
return f.p return f.p
} }
func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
return &UnknownFrame{fh, p}, nil return &UnknownFrame{fh, p}, nil
} }
@ -825,7 +888,7 @@ type WindowUpdateFrame struct {
Increment uint32 // never read with high bit set Increment uint32 // never read with high bit set
} }
func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 { if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -840,7 +903,7 @@ func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
return nil, ConnectionError(ErrCodeProtocol) return nil, ConnectionError(ErrCodeProtocol)
} }
return nil, StreamError{fh.StreamID, ErrCodeProtocol} return nil, streamError(fh.StreamID, ErrCodeProtocol)
} }
return &WindowUpdateFrame{ return &WindowUpdateFrame{
FrameHeader: fh, FrameHeader: fh,
@ -890,12 +953,12 @@ func (f *HeadersFrame) HasPriority() bool {
return f.FrameHeader.Flags.Has(FlagHeadersPriority) return f.FrameHeader.Flags.Has(FlagHeadersPriority)
} }
func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
hf := &HeadersFrame{ hf := &HeadersFrame{
FrameHeader: fh, FrameHeader: fh,
} }
if fh.StreamID == 0 { if fh.StreamID == 0 {
// HEADERS frames MUST be associated with a stream. If a HEADERS frame // HEADERS frames MUST be associated with a stream. If a HEADERS frame
// is received whose stream identifier field is 0x0, the recipient MUST // is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type // respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR. // PROTOCOL_ERROR.
@ -921,7 +984,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
} }
} }
if len(p)-int(padLength) <= 0 { if len(p)-int(padLength) <= 0 {
return nil, StreamError{fh.StreamID, ErrCodeProtocol} return nil, streamError(fh.StreamID, ErrCodeProtocol)
} }
hf.headerFragBuf = p[:len(p)-int(padLength)] hf.headerFragBuf = p[:len(p)-int(padLength)]
return hf, nil return hf, nil
@ -1017,7 +1080,7 @@ type PriorityParam struct {
Exclusive bool Exclusive bool
// Weight is the stream's zero-indexed weight. It should be // Weight is the stream's zero-indexed weight. It should be
// set together with StreamDep, or neither should be set. Per // set together with StreamDep, or neither should be set. Per
// the spec, "Add one to the value to obtain a weight between // the spec, "Add one to the value to obtain a weight between
// 1 and 256." // 1 and 256."
Weight uint8 Weight uint8
@ -1027,7 +1090,7 @@ func (p PriorityParam) IsZero() bool {
return p == PriorityParam{} return p == PriorityParam{}
} }
func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
} }
@ -1074,7 +1137,7 @@ type RSTStreamFrame struct {
ErrCode ErrCode ErrCode ErrCode
} }
func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 { if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -1104,7 +1167,7 @@ type ContinuationFrame struct {
headerFragBuf []byte headerFragBuf []byte
} }
func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
} }
@ -1154,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
} }
func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
pp := &PushPromiseFrame{ pp := &PushPromiseFrame{
FrameHeader: fh, FrameHeader: fh,
} }
@ -1396,6 +1459,9 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
hdec.SetEmitEnabled(true) hdec.SetEmitEnabled(true)
hdec.SetMaxStringLength(fr.maxHeaderStringLen()) hdec.SetMaxStringLength(fr.maxHeaderStringLen())
hdec.SetEmitFunc(func(hf hpack.HeaderField) { hdec.SetEmitFunc(func(hf hpack.HeaderField) {
if VerboseLogs && fr.logReads {
fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
}
if !httplex.ValidHeaderFieldValue(hf.Value) { if !httplex.ValidHeaderFieldValue(hf.Value) {
invalid = headerFieldValueError(hf.Value) invalid = headerFieldValueError(hf.Value)
} }
@ -1454,11 +1520,17 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
} }
if invalid != nil { if invalid != nil {
fr.errDetail = invalid fr.errDetail = invalid
return nil, StreamError{mh.StreamID, ErrCodeProtocol} if VerboseLogs {
log.Printf("http2: invalid header: %v", invalid)
}
return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
} }
if err := mh.checkPseudos(); err != nil { if err := mh.checkPseudos(); err != nil {
fr.errDetail = err fr.errDetail = err
return nil, StreamError{mh.StreamID, ErrCodeProtocol} if VerboseLogs {
log.Printf("http2: invalid pseudo headers: %v", err)
}
return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
} }
return mh, nil return mh, nil
} }

View file

@ -39,6 +39,13 @@ type clientTrace httptrace.ClientTrace
func reqContext(r *http.Request) context.Context { return r.Context() } func reqContext(r *http.Request) context.Context { return r.Context() }
func (t *Transport) idleConnTimeout() time.Duration {
if t.t1 != nil {
return t.t1.IdleConnTimeout
}
return 0
}
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
func traceGotConn(req *http.Request, cc *ClientConn) { func traceGotConn(req *http.Request, cc *ClientConn) {
@ -92,3 +99,8 @@ func requestTrace(req *http.Request) *clientTrace {
trace := httptrace.ContextClientTrace(req.Context()) trace := httptrace.ContextClientTrace(req.Context())
return (*clientTrace)(trace) return (*clientTrace)(trace)
} }
// Ping sends a PING frame to the server and waits for the ack.
func (cc *ClientConn) Ping(ctx context.Context) error {
return cc.ping(ctx)
}

36
vendor/golang.org/x/net/http2/go17_not18.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,!go1.8
package http2
import "crypto/tls"
// temporary copy of Go 1.7's private tls.Config.clone:
func cloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
}
}

54
vendor/golang.org/x/net/http2/go18.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package http2
import (
"crypto/tls"
"io"
"net/http"
)
func cloneTLSConfig(c *tls.Config) *tls.Config {
c2 := c.Clone()
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
return c2
}
var _ http.Pusher = (*responseWriter)(nil)
// Push implements http.Pusher.
func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
internalOpts := pushOptions{}
if opts != nil {
internalOpts.Method = opts.Method
internalOpts.Header = opts.Header
}
return w.push(target, internalOpts)
}
func configureServer18(h1 *http.Server, h2 *Server) error {
if h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
} else {
h2.IdleTimeout = h1.ReadTimeout
}
}
return nil
}
func shouldLogPanic(panicValue interface{}) bool {
return panicValue != nil && panicValue != http.ErrAbortHandler
}
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
return req.GetBody
}
func reqBodyIsNoBody(body io.ReadCloser) bool {
return body == http.NoBody
}

View file

@ -39,13 +39,14 @@ func NewEncoder(w io.Writer) *Encoder {
tableSizeUpdate: false, tableSizeUpdate: false,
w: w, w: w,
} }
e.dynTab.table.init()
e.dynTab.setMaxSize(initialHeaderTableSize) e.dynTab.setMaxSize(initialHeaderTableSize)
return e return e
} }
// WriteField encodes f into a single Write to e's underlying Writer. // WriteField encodes f into a single Write to e's underlying Writer.
// This function may also produce bytes for "Header Table Size Update" // This function may also produce bytes for "Header Table Size Update"
// if necessary. If produced, it is done before encoding f. // if necessary. If produced, it is done before encoding f.
func (e *Encoder) WriteField(f HeaderField) error { func (e *Encoder) WriteField(f HeaderField) error {
e.buf = e.buf[:0] e.buf = e.buf[:0]
@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error {
// only name matches, i points to that index and nameValueMatch // only name matches, i points to that index and nameValueMatch
// becomes false. // becomes false.
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
for idx, hf := range staticTable { i, nameValueMatch = staticTable.search(f)
if !constantTimeStringCompare(hf.Name, f.Name) { if nameValueMatch {
continue return i, true
}
if i == 0 {
i = uint64(idx + 1)
}
if f.Sensitive {
continue
}
if !constantTimeStringCompare(hf.Value, f.Value) {
continue
}
i = uint64(idx + 1)
nameValueMatch = true
return
} }
j, nameValueMatch := e.dynTab.search(f) j, nameValueMatch := e.dynTab.table.search(f)
if nameValueMatch || (i == 0 && j != 0) { if nameValueMatch || (i == 0 && j != 0) {
i = j + uint64(len(staticTable)) return j + uint64(staticTable.len()), nameValueMatch
} }
return
return i, false
} }
// SetMaxDynamicTableSize changes the dynamic header table size to v. // SetMaxDynamicTableSize changes the dynamic header table size to v.

View file

@ -57,11 +57,11 @@ func (hf HeaderField) String() string {
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
} }
// Size returns the size of an entry per RFC 7540 section 5.2. // Size returns the size of an entry per RFC 7541 section 4.1.
func (hf HeaderField) Size() uint32 { func (hf HeaderField) Size() uint32 {
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of // "The size of the dynamic table is the sum of the size of
// its entries. The size of an entry is the sum of its name's // its entries. The size of an entry is the sum of its name's
// length in octets (as defined in Section 5.2), its value's // length in octets (as defined in Section 5.2), its value's
// length in octets (see Section 5.2), plus 32. The size of // length in octets (see Section 5.2), plus 32. The size of
// an entry is calculated using the length of the name and // an entry is calculated using the length of the name and
@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
emit: emitFunc, emit: emitFunc,
emitEnabled: true, emitEnabled: true,
} }
d.dynTab.table.init()
d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.allowedMaxSize = maxDynamicTableSize
d.dynTab.setMaxSize(maxDynamicTableSize) d.dynTab.setMaxSize(maxDynamicTableSize)
return d return d
@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
} }
type dynamicTable struct { type dynamicTable struct {
// ents is the FIFO described at
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
// The newest (low index) is append at the end, and items are table headerFieldTable
// evicted from the front. size uint32 // in bytes
ents []HeaderField
size uint32
maxSize uint32 // current maxSize maxSize uint32 // current maxSize
allowedMaxSize uint32 // maxSize may go up to this, inclusive allowedMaxSize uint32 // maxSize may go up to this, inclusive
} }
@ -169,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
dt.evict() dt.evict()
} }
// TODO: change dynamicTable to be a struct with a slice and a size int field,
// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
//
//
// Then make add increment the size. maybe the max size should move from Decoder to
// dynamicTable and add should return an ok bool if there was enough space.
//
// Later we'll need a remove operation on dynamicTable.
func (dt *dynamicTable) add(f HeaderField) { func (dt *dynamicTable) add(f HeaderField) {
dt.ents = append(dt.ents, f) dt.table.addEntry(f)
dt.size += f.Size() dt.size += f.Size()
dt.evict() dt.evict()
} }
// If we're too big, evict old stuff (front of the slice) // If we're too big, evict old stuff.
func (dt *dynamicTable) evict() { func (dt *dynamicTable) evict() {
base := dt.ents // keep base pointer of slice var n int
for dt.size > dt.maxSize { for dt.size > dt.maxSize && n < dt.table.len() {
dt.size -= dt.ents[0].Size() dt.size -= dt.table.ents[n].Size()
dt.ents = dt.ents[1:] n++
} }
dt.table.evictOldest(n)
// Shift slice contents down if we evicted things.
if len(dt.ents) != len(base) {
copy(base, dt.ents)
dt.ents = base[:len(dt.ents)]
}
}
// constantTimeStringCompare compares string a and b in a constant
// time manner.
func constantTimeStringCompare(a, b string) bool {
if len(a) != len(b) {
return false
}
c := byte(0)
for i := 0; i < len(a); i++ {
c |= a[i] ^ b[i]
}
return c == 0
}
// Search searches f in the table. The return value i is 0 if there is
// no name match. If there is name match or name/value match, i is the
// index of that entry (1-based). If both name and value match,
// nameValueMatch becomes true.
func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
l := len(dt.ents)
for j := l - 1; j >= 0; j-- {
ent := dt.ents[j]
if !constantTimeStringCompare(ent.Name, f.Name) {
continue
}
if i == 0 {
i = uint64(l - j)
}
if f.Sensitive {
continue
}
if !constantTimeStringCompare(ent.Value, f.Value) {
continue
}
i = uint64(l - j)
nameValueMatch = true
return
}
return
} }
func (d *Decoder) maxTableIndex() int { func (d *Decoder) maxTableIndex() int {
return len(d.dynTab.ents) + len(staticTable) // This should never overflow. RFC 7540 Section 6.5.2 limits the size of
// the dynamic table to 2^32 bytes, where each entry will occupy more than
// one byte. Further, the staticTable has a fixed, small length.
return d.dynTab.table.len() + staticTable.len()
} }
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
if i < 1 { // See Section 2.3.3.
if i == 0 {
return return
} }
if i <= uint64(staticTable.len()) {
return staticTable.ents[i-1], true
}
if i > uint64(d.maxTableIndex()) { if i > uint64(d.maxTableIndex()) {
return return
} }
if i <= uint64(len(staticTable)) { // In the dynamic table, newer entries have lower indices.
return staticTable[i-1], true // However, dt.ents[0] is the oldest entry. Hence, dt.ents is
} // the reversed dynamic table.
dents := d.dynTab.ents dt := d.dynTab.table
return dents[len(dents)-(int(i)-len(staticTable))], true return dt.ents[dt.len()-(int(i)-staticTable.len())], true
} }
// Decode decodes an entire block. // Decode decodes an entire block.
@ -307,7 +255,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
err = d.parseHeaderFieldRepr() err = d.parseHeaderFieldRepr()
if err == errNeedMore { if err == errNeedMore {
// Extra paranoia, making sure saveBuf won't // Extra paranoia, making sure saveBuf won't
// get too large. All the varint and string // get too large. All the varint and string
// reading code earlier should already catch // reading code earlier should already catch
// overlong things and return ErrStringLength, // overlong things and return ErrStringLength,
// but keep this as a last resort. // but keep this as a last resort.

View file

@ -4,73 +4,199 @@
package hpack package hpack
import (
"fmt"
)
// headerFieldTable implements a list of HeaderFields.
// This is used to implement the static and dynamic tables.
type headerFieldTable struct {
// For static tables, entries are never evicted.
//
// For dynamic tables, entries are evicted from ents[0] and added to the end.
// Each entry has a unique id that starts at one and increments for each
// entry that is added. This unique id is stable across evictions, meaning
// it can be used as a pointer to a specific entry. As in hpack, unique ids
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
//
// Zero is not a valid unique id.
//
// evictCount should not overflow in any remotely practical situation. In
// practice, we will have one dynamic table per HTTP/2 connection. If we
// assume a very powerful server that handles 1M QPS per connection and each
// request adds (then evicts) 100 entries from the table, it would still take
// 2M years for evictCount to overflow.
ents []HeaderField
evictCount uint64
// byName maps a HeaderField name to the unique id of the newest entry with
// the same name. See above for a definition of "unique id".
byName map[string]uint64
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
// entry with the same name and value. See above for a definition of "unique id".
byNameValue map[pairNameValue]uint64
}
type pairNameValue struct {
name, value string
}
func (t *headerFieldTable) init() {
t.byName = make(map[string]uint64)
t.byNameValue = make(map[pairNameValue]uint64)
}
// len reports the number of entries in the table.
func (t *headerFieldTable) len() int {
return len(t.ents)
}
// addEntry adds a new entry.
func (t *headerFieldTable) addEntry(f HeaderField) {
id := uint64(t.len()) + t.evictCount + 1
t.byName[f.Name] = id
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
t.ents = append(t.ents, f)
}
// evictOldest evicts the n oldest entries in the table.
func (t *headerFieldTable) evictOldest(n int) {
if n > t.len() {
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
}
for k := 0; k < n; k++ {
f := t.ents[k]
id := t.evictCount + uint64(k) + 1
if t.byName[f.Name] == id {
delete(t.byName, f.Name)
}
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
delete(t.byNameValue, p)
}
}
copy(t.ents, t.ents[n:])
for k := t.len() - n; k < t.len(); k++ {
t.ents[k] = HeaderField{} // so strings can be garbage collected
}
t.ents = t.ents[:t.len()-n]
if t.evictCount+uint64(n) < t.evictCount {
panic("evictCount overflow")
}
t.evictCount += uint64(n)
}
// search finds f in the table. If there is no match, i is 0.
// If both name and value match, i is the matched index and nameValueMatch
// becomes true. If only name matches, i points to that index and
// nameValueMatch becomes false.
//
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
// table, the return value i actually refers to the entry t.ents[t.len()-i].
//
// All tables are assumed to be a dynamic tables except for the global
// staticTable pointer.
//
// See Section 2.3.3.
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
if !f.Sensitive {
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
return t.idToIndex(id), true
}
}
if id := t.byName[f.Name]; id != 0 {
return t.idToIndex(id), false
}
return 0, false
}
// idToIndex converts a unique id to an HPACK index.
// See Section 2.3.3.
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
if id <= t.evictCount {
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
}
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
if t != staticTable {
return uint64(t.len()) - k // dynamic table
}
return k + 1
}
func pair(name, value string) HeaderField { func pair(name, value string) HeaderField {
return HeaderField{Name: name, Value: value} return HeaderField{Name: name, Value: value}
} }
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
var staticTable = [...]HeaderField{ var staticTable = newStaticTable()
pair(":authority", ""), // index 1 (1-based)
pair(":method", "GET"), func newStaticTable() *headerFieldTable {
pair(":method", "POST"), t := &headerFieldTable{}
pair(":path", "/"), t.init()
pair(":path", "/index.html"), t.addEntry(pair(":authority", ""))
pair(":scheme", "http"), t.addEntry(pair(":method", "GET"))
pair(":scheme", "https"), t.addEntry(pair(":method", "POST"))
pair(":status", "200"), t.addEntry(pair(":path", "/"))
pair(":status", "204"), t.addEntry(pair(":path", "/index.html"))
pair(":status", "206"), t.addEntry(pair(":scheme", "http"))
pair(":status", "304"), t.addEntry(pair(":scheme", "https"))
pair(":status", "400"), t.addEntry(pair(":status", "200"))
pair(":status", "404"), t.addEntry(pair(":status", "204"))
pair(":status", "500"), t.addEntry(pair(":status", "206"))
pair("accept-charset", ""), t.addEntry(pair(":status", "304"))
pair("accept-encoding", "gzip, deflate"), t.addEntry(pair(":status", "400"))
pair("accept-language", ""), t.addEntry(pair(":status", "404"))
pair("accept-ranges", ""), t.addEntry(pair(":status", "500"))
pair("accept", ""), t.addEntry(pair("accept-charset", ""))
pair("access-control-allow-origin", ""), t.addEntry(pair("accept-encoding", "gzip, deflate"))
pair("age", ""), t.addEntry(pair("accept-language", ""))
pair("allow", ""), t.addEntry(pair("accept-ranges", ""))
pair("authorization", ""), t.addEntry(pair("accept", ""))
pair("cache-control", ""), t.addEntry(pair("access-control-allow-origin", ""))
pair("content-disposition", ""), t.addEntry(pair("age", ""))
pair("content-encoding", ""), t.addEntry(pair("allow", ""))
pair("content-language", ""), t.addEntry(pair("authorization", ""))
pair("content-length", ""), t.addEntry(pair("cache-control", ""))
pair("content-location", ""), t.addEntry(pair("content-disposition", ""))
pair("content-range", ""), t.addEntry(pair("content-encoding", ""))
pair("content-type", ""), t.addEntry(pair("content-language", ""))
pair("cookie", ""), t.addEntry(pair("content-length", ""))
pair("date", ""), t.addEntry(pair("content-location", ""))
pair("etag", ""), t.addEntry(pair("content-range", ""))
pair("expect", ""), t.addEntry(pair("content-type", ""))
pair("expires", ""), t.addEntry(pair("cookie", ""))
pair("from", ""), t.addEntry(pair("date", ""))
pair("host", ""), t.addEntry(pair("etag", ""))
pair("if-match", ""), t.addEntry(pair("expect", ""))
pair("if-modified-since", ""), t.addEntry(pair("expires", ""))
pair("if-none-match", ""), t.addEntry(pair("from", ""))
pair("if-range", ""), t.addEntry(pair("host", ""))
pair("if-unmodified-since", ""), t.addEntry(pair("if-match", ""))
pair("last-modified", ""), t.addEntry(pair("if-modified-since", ""))
pair("link", ""), t.addEntry(pair("if-none-match", ""))
pair("location", ""), t.addEntry(pair("if-range", ""))
pair("max-forwards", ""), t.addEntry(pair("if-unmodified-since", ""))
pair("proxy-authenticate", ""), t.addEntry(pair("last-modified", ""))
pair("proxy-authorization", ""), t.addEntry(pair("link", ""))
pair("range", ""), t.addEntry(pair("location", ""))
pair("referer", ""), t.addEntry(pair("max-forwards", ""))
pair("refresh", ""), t.addEntry(pair("proxy-authenticate", ""))
pair("retry-after", ""), t.addEntry(pair("proxy-authorization", ""))
pair("server", ""), t.addEntry(pair("range", ""))
pair("set-cookie", ""), t.addEntry(pair("referer", ""))
pair("strict-transport-security", ""), t.addEntry(pair("refresh", ""))
pair("transfer-encoding", ""), t.addEntry(pair("retry-after", ""))
pair("user-agent", ""), t.addEntry(pair("server", ""))
pair("vary", ""), t.addEntry(pair("set-cookie", ""))
pair("via", ""), t.addEntry(pair("strict-transport-security", ""))
pair("www-authenticate", ""), t.addEntry(pair("transfer-encoding", ""))
t.addEntry(pair("user-agent", ""))
t.addEntry(pair("vary", ""))
t.addEntry(pair("via", ""))
t.addEntry(pair("www-authenticate", ""))
return t
} }
var huffmanCodes = [256]uint32{ var huffmanCodes = [256]uint32{

View file

@ -13,7 +13,8 @@
// See https://http2.github.io/ for more information on HTTP/2. // See https://http2.github.io/ for more information on HTTP/2.
// //
// See https://http2.golang.org/ for a test server running this code. // See https://http2.golang.org/ for a test server running this code.
package http2 //
package http2 // import "golang.org/x/net/http2"
import ( import (
"bufio" "bufio"
@ -35,6 +36,7 @@ var (
VerboseLogs bool VerboseLogs bool
logFrameWrites bool logFrameWrites bool
logFrameReads bool logFrameReads bool
inTests bool
) )
func init() { func init() {
@ -76,13 +78,23 @@ var (
type streamState int type streamState int
// HTTP/2 stream states.
//
// See http://tools.ietf.org/html/rfc7540#section-5.1.
//
// For simplicity, the server code merges "reserved (local)" into
// "half-closed (remote)". This is one less state transition to track.
// The only downside is that we send PUSH_PROMISEs slightly less
// liberally than allowable. More discussion here:
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
//
// "reserved (remote)" is omitted since the client code does not
// support server push.
const ( const (
stateIdle streamState = iota stateIdle streamState = iota
stateOpen stateOpen
stateHalfClosedLocal stateHalfClosedLocal
stateHalfClosedRemote stateHalfClosedRemote
stateResvLocal
stateResvRemote
stateClosed stateClosed
) )
@ -91,8 +103,6 @@ var stateName = [...]string{
stateOpen: "Open", stateOpen: "Open",
stateHalfClosedLocal: "HalfClosedLocal", stateHalfClosedLocal: "HalfClosedLocal",
stateHalfClosedRemote: "HalfClosedRemote", stateHalfClosedRemote: "HalfClosedRemote",
stateResvLocal: "ResvLocal",
stateResvRemote: "ResvRemote",
stateClosed: "Closed", stateClosed: "Closed",
} }
@ -252,14 +262,27 @@ func newBufferedWriter(w io.Writer) *bufferedWriter {
return &bufferedWriter{w: w} return &bufferedWriter{w: w}
} }
// bufWriterPoolBufferSize is the size of bufio.Writer's
// buffers created using bufWriterPool.
//
// TODO: pick a less arbitrary value? this is a bit under
// (3 x typical 1500 byte MTU) at least. Other than that,
// not much thought went into it.
const bufWriterPoolBufferSize = 4 << 10
var bufWriterPool = sync.Pool{ var bufWriterPool = sync.Pool{
New: func() interface{} { New: func() interface{} {
// TODO: pick something better? this is a bit under return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
// (3 x typical 1500 byte MTU) at least.
return bufio.NewWriterSize(nil, 4<<10)
}, },
} }
func (w *bufferedWriter) Available() int {
if w.bw == nil {
return bufWriterPoolBufferSize
}
return w.bw.Available()
}
func (w *bufferedWriter) Write(p []byte) (n int, err error) { func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil { if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer) bw := bufWriterPool.Get().(*bufio.Writer)
@ -342,10 +365,23 @@ func (s *sorter) Keys(h http.Header) []string {
} }
func (s *sorter) SortStrings(ss []string) { func (s *sorter) SortStrings(ss []string) {
// Our sorter works on s.v, which sorter owners, so // Our sorter works on s.v, which sorter owns, so
// stash it away while we sort the user's buffer. // stash it away while we sort the user's buffer.
save := s.v save := s.v
s.v = ss s.v = ss
sort.Sort(s) sort.Sort(s)
s.v = save s.v = save
} }
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// *) a non-empty string starting with '/', but not with with "//",
// *) the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
}

View file

@ -7,11 +7,16 @@
package http2 package http2
import ( import (
"crypto/tls"
"net" "net"
"net/http" "net/http"
"time"
) )
type contextContext interface{} type contextContext interface {
Done() <-chan struct{}
Err() error
}
type fakeContext struct{} type fakeContext struct{}
@ -49,3 +54,34 @@ func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
func requestWithContext(req *http.Request, ctx contextContext) *http.Request { func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
return req return req
} }
// temporary copy of Go 1.6's private tls.Config.clone:
func cloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
}
}
func (cc *ClientConn) Ping(ctx contextContext) error {
return cc.ping(ctx)
}
func (t *Transport) idleConnTimeout() time.Duration { return 0 }

27
vendor/golang.org/x/net/http2/not_go18.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.8
package http2
import (
"io"
"net/http"
)
func configureServer18(h1 *http.Server, h2 *Server) error {
// No IdleTimeout to sync prior to Go 1.8.
return nil
}
func shouldLogPanic(panicValue interface{}) bool {
return panicValue != nil
}
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
return nil
}
func reqBodyIsNoBody(io.ReadCloser) bool { return false }

View file

@ -10,7 +10,7 @@ import (
"sync" "sync"
) )
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like // pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// io.Pipe except there are no PipeReader/PipeWriter halves, and the // io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered) // underlying buffer is an interface. (io.Pipe is always unbuffered)
type pipe struct { type pipe struct {

1266
vendor/golang.org/x/net/http2/server.go generated vendored

File diff suppressed because it is too large Load diff

View file

@ -10,12 +10,14 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"crypto/rand"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
"math"
"net" "net"
"net/http" "net/http"
"sort" "sort"
@ -25,6 +27,7 @@ import (
"time" "time"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
"golang.org/x/net/lex/httplex" "golang.org/x/net/lex/httplex"
) )
@ -148,27 +151,32 @@ type ClientConn struct {
readerDone chan struct{} // closed on error readerDone chan struct{} // closed on error
readerErr error // set before readerDone is closed readerErr error // set before readerDone is closed
mu sync.Mutex // guards following idleTimeout time.Duration // or 0 for never
cond *sync.Cond // hold mu; broadcast on flow/closed changes idleTimer *time.Timer
flow flow // our conn-level flow control quota (cs.flow is per stream)
inflow flow // peer's conn-level flow control
closed bool
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated
nextStreamID uint32
bw *bufio.Writer
br *bufio.Reader
fr *Framer
lastActive time.Time
// Settings from peer: mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow flow // our conn-level flow control quota (cs.flow is per stream)
inflow flow // peer's conn-level flow control
closed bool
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated
nextStreamID uint32
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
bw *bufio.Writer
br *bufio.Reader
fr *Framer
lastActive time.Time
// Settings from peer: (also guarded by mu)
maxFrameSize uint32 maxFrameSize uint32
maxConcurrentStreams uint32 maxConcurrentStreams uint32
initialWindowSize uint32 initialWindowSize uint32
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder hbuf bytes.Buffer // HPACK encoder writes into this
freeBuf [][]byte henc *hpack.Encoder
freeBuf [][]byte
wmu sync.Mutex // held while writing; acquire AFTER mu if holding both wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
werr error // first write error that has occurred werr error // first write error that has occurred
@ -183,6 +191,7 @@ type clientStream struct {
ID uint32 ID uint32
resc chan resAndError resc chan resAndError
bufPipe pipe // buffered pipe with the flow-controlled response payload bufPipe pipe // buffered pipe with the flow-controlled response payload
startedWrite bool // started request body write; guarded by cc.mu
requestedGzip bool requestedGzip bool
on100 func() // optional code to run if get a 100 continue response on100 func() // optional code to run if get a 100 continue response
@ -191,6 +200,7 @@ type clientStream struct {
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
readErr error // sticky read error; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read
stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
peerReset chan struct{} // closed on peer reset peerReset chan struct{} // closed on peer reset
resetErr error // populated before peerReset is closed resetErr error // populated before peerReset is closed
@ -218,15 +228,26 @@ func (cs *clientStream) awaitRequestCancel(req *http.Request) {
} }
select { select {
case <-req.Cancel: case <-req.Cancel:
cs.cancelStream()
cs.bufPipe.CloseWithError(errRequestCanceled) cs.bufPipe.CloseWithError(errRequestCanceled)
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
case <-ctx.Done(): case <-ctx.Done():
cs.cancelStream()
cs.bufPipe.CloseWithError(ctx.Err()) cs.bufPipe.CloseWithError(ctx.Err())
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
case <-cs.done: case <-cs.done:
} }
} }
func (cs *clientStream) cancelStream() {
cs.cc.mu.Lock()
didReset := cs.didReset
cs.didReset = true
cs.cc.mu.Unlock()
if !didReset {
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
}
}
// checkResetOrDone reports any error sent in a RST_STREAM frame by the // checkResetOrDone reports any error sent in a RST_STREAM frame by the
// server, or errStreamClosed if the stream is complete. // server, or errStreamClosed if the stream is complete.
func (cs *clientStream) checkResetOrDone() error { func (cs *clientStream) checkResetOrDone() error {
@ -283,14 +304,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) // authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
// and returns a host:port. The port 443 is added if needed. // and returns a host:port. The port 443 is added if needed.
func authorityAddr(scheme string, authority string) (addr string) { func authorityAddr(scheme string, authority string) (addr string) {
if _, _, err := net.SplitHostPort(authority); err == nil { host, port, err := net.SplitHostPort(authority)
return authority if err != nil { // authority didn't have a port
port = "443"
if scheme == "http" {
port = "80"
}
host = authority
} }
port := "443" if a, err := idna.ToASCII(host); err == nil {
if scheme == "http" { host = a
port = "80"
} }
return net.JoinHostPort(authority, port) // IPv6 address literal, without a port:
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
return host + ":" + port
}
return net.JoinHostPort(host, port)
} }
// RoundTripOpt is like RoundTrip, but takes options. // RoundTripOpt is like RoundTrip, but takes options.
@ -308,8 +337,10 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
} }
traceGotConn(req, cc) traceGotConn(req, cc)
res, err := cc.RoundTrip(req) res, err := cc.RoundTrip(req)
if shouldRetryRequest(req, err) { if err != nil {
continue if req, err = shouldRetryRequest(req, err); err == nil {
continue
}
} }
if err != nil { if err != nil {
t.vlogf("RoundTrip failure: %v", err) t.vlogf("RoundTrip failure: %v", err)
@ -331,12 +362,41 @@ func (t *Transport) CloseIdleConnections() {
var ( var (
errClientConnClosed = errors.New("http2: client conn is closed") errClientConnClosed = errors.New("http2: client conn is closed")
errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written")
) )
func shouldRetryRequest(req *http.Request, err error) bool { // shouldRetryRequest is called by RoundTrip when a request fails to get
// TODO: retry GET requests (no bodies) more aggressively, if shutdown // response headers. It is always called with a non-nil error.
// before response. // It returns either a request to retry (either the same request, or a
return err == errClientConnUnusable // modified clone), or an error if the request can't be replayed.
func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
switch err {
default:
return nil, err
case errClientConnUnusable, errClientConnGotGoAway:
return req, nil
case errClientConnGotGoAwayAfterSomeReqBody:
// If the Body is nil (or http.NoBody), it's safe to reuse
// this request and its Body.
if req.Body == nil || reqBodyIsNoBody(req.Body) {
return req, nil
}
// Otherwise we depend on the Request having its GetBody
// func defined.
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
if getBody == nil {
return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error")
}
body, err := getBody()
if err != nil {
return nil, err
}
newReq := *req
newReq.Body = body
return &newReq, nil
}
} }
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
@ -354,7 +414,7 @@ func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, er
func (t *Transport) newTLSConfig(host string) *tls.Config { func (t *Transport) newTLSConfig(host string) *tls.Config {
cfg := new(tls.Config) cfg := new(tls.Config)
if t.TLSClientConfig != nil { if t.TLSClientConfig != nil {
*cfg = *t.TLSClientConfig *cfg = *cloneTLSConfig(t.TLSClientConfig)
} }
if !strSliceContains(cfg.NextProtos, NextProtoTLS) { if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
@ -413,14 +473,6 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
} }
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
if VerboseLogs {
t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
}
if _, err := c.Write(clientPreface); err != nil {
t.vlogf("client preface write error: %v", err)
return nil, err
}
cc := &ClientConn{ cc := &ClientConn{
t: t, t: t,
tconn: c, tconn: c,
@ -431,7 +483,17 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
streams: make(map[uint32]*clientStream), streams: make(map[uint32]*clientStream),
singleUse: singleUse, singleUse: singleUse,
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
} }
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
}
cc.cond = sync.NewCond(&cc.mu) cc.cond = sync.NewCond(&cc.mu)
cc.flow.add(int32(initialWindowSize)) cc.flow.add(int32(initialWindowSize))
@ -459,6 +521,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
if max := t.maxHeaderListSize(); max != 0 { if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
} }
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...) cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
@ -467,33 +531,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
return nil, cc.werr return nil, cc.werr
} }
// Read the obligatory SETTINGS frame
f, err := cc.fr.ReadFrame()
if err != nil {
return nil, err
}
sf, ok := f.(*SettingsFrame)
if !ok {
return nil, fmt.Errorf("expected settings frame, got: %T", f)
}
cc.fr.WriteSettingsAck()
cc.bw.Flush()
sf.ForeachSetting(func(s Setting) error {
switch s.ID {
case SettingMaxFrameSize:
cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
case SettingInitialWindowSize:
cc.initialWindowSize = s.Val
default:
// TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
t.vlogf("Unhandled Setting: %v", s)
}
return nil
})
go cc.readLoop() go cc.readLoop()
return cc, nil return cc, nil
} }
@ -512,6 +549,15 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
if old != nil && old.ErrCode != ErrCodeNo { if old != nil && old.ErrCode != ErrCodeNo {
cc.goAway.ErrCode = old.ErrCode cc.goAway.ErrCode = old.ErrCode
} }
last := f.LastStreamID
for streamID, cs := range cc.streams {
if streamID > last {
select {
case cs.resc <- resAndError{err: errClientConnGotGoAway}:
default:
}
}
}
} }
func (cc *ClientConn) CanTakeNewRequest() bool { func (cc *ClientConn) CanTakeNewRequest() bool {
@ -526,7 +572,17 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
} }
return cc.goAway == nil && !cc.closed && return cc.goAway == nil && !cc.closed &&
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
cc.nextStreamID < 2147483647 cc.nextStreamID < math.MaxInt32
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
// so this simply calls the synchronized closeIfIdle to shut down this
// connection. The timer could just call closeIfIdle, but this is more
// clear.
func (cc *ClientConn) onIdleTimeout() {
cc.closeIfIdle()
} }
func (cc *ClientConn) closeIfIdle() { func (cc *ClientConn) closeIfIdle() {
@ -536,9 +592,13 @@ func (cc *ClientConn) closeIfIdle() {
return return
} }
cc.closed = true cc.closed = true
nextID := cc.nextStreamID
// TODO: do clients send GOAWAY too? maybe? Just Close: // TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock() cc.mu.Unlock()
if VerboseLogs {
cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
}
cc.tconn.Close() cc.tconn.Close()
} }
@ -598,8 +658,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
} }
if len(keys) > 0 { if len(keys) > 0 {
sort.Strings(keys) sort.Strings(keys)
// TODO: could do better allocation-wise here, but trailers are rare,
// so being lazy for now.
return strings.Join(keys, ","), nil return strings.Join(keys, ","), nil
} }
return "", nil return "", nil
@ -621,48 +679,37 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
// Certain headers are special-cased as okay but not transmitted later. // Certain headers are special-cased as okay but not transmitted later.
func checkConnHeaders(req *http.Request) error { func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" { if v := req.Header.Get("Upgrade"); v != "" {
return errors.New("http2: invalid Upgrade request header") return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
} }
if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return errors.New("http2: invalid Transfer-Encoding request header") return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
} }
if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
return errors.New("http2: invalid Connection request header") return fmt.Errorf("http2: invalid Connection request header: %q", vv)
} }
return nil return nil
} }
func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) { // actualContentLength returns a sanitized version of
body = req.Body // req.ContentLength, where 0 actually means zero (not unknown) and -1
if body == nil { // means unknown.
return nil, 0 func actualContentLength(req *http.Request) int64 {
if req.Body == nil {
return 0
} }
if req.ContentLength != 0 { if req.ContentLength != 0 {
return req.Body, req.ContentLength return req.ContentLength
} }
return -1
// We have a body but a zero content length. Test to see if
// it's actually zero or just unset.
var buf [1]byte
n, rerr := io.ReadFull(body, buf[:])
if rerr != nil && rerr != io.EOF {
return errorReader{rerr}, -1
}
if n == 1 {
// Oh, guess there is data in this Body Reader after all.
// The ContentLength field just wasn't set.
// Stich the Body back together again, re-attaching our
// consumed byte.
return io.MultiReader(bytes.NewReader(buf[:]), body), -1
}
// Body is actually zero bytes.
return nil, 0
} }
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
if err := checkConnHeaders(req); err != nil { if err := checkConnHeaders(req); err != nil {
return nil, err return nil, err
} }
if cc.idleTimer != nil {
cc.idleTimer.Stop()
}
trailers, err := commaSeparatedTrailers(req) trailers, err := commaSeparatedTrailers(req)
if err != nil { if err != nil {
@ -670,9 +717,6 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
} }
hasTrailers := trailers != "" hasTrailers := trailers != ""
body, contentLen := bodyAndLength(req)
hasBody := body != nil
cc.mu.Lock() cc.mu.Lock()
cc.lastActive = time.Now() cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() { if cc.closed || !cc.canTakeNewRequestLocked() {
@ -680,6 +724,10 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return nil, errClientConnUnusable return nil, errClientConnUnusable
} }
body := req.Body
hasBody := body != nil
contentLen := actualContentLength(req)
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool var requestedGzip bool
if !cc.t.disableCompression() && if !cc.t.disableCompression() &&
@ -761,13 +809,20 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// 2xx, however, then assume the server DOES potentially // 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming: // want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server // golang.org/issue/13444). If it turns out the server
// doesn't, they'll RST_STREAM us soon enough. This is a // doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully // heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it. // we can keep it.
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite) cs.abortRequestBodyWrite(errStopReqBodyWrite)
} }
if re.err != nil { if re.err != nil {
if re.err == errClientConnGotGoAway {
cc.mu.Lock()
if cs.startedWrite {
re.err = errClientConnGotGoAwayAfterSomeReqBody
}
cc.mu.Unlock()
}
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
return nil, re.err return nil, re.err
} }
@ -923,10 +978,11 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
err = cc.fr.WriteData(cs.ID, sentEnd, data) err = cc.fr.WriteData(cs.ID, sentEnd, data)
if err == nil { if err == nil {
// TODO(bradfitz): this flush is for latency, not bandwidth. // TODO(bradfitz): this flush is for latency, not bandwidth.
// Most requests won't need this. Make this opt-in or opt-out? // Most requests won't need this. Make this opt-in or
// Use some heuristic on the body type? Nagel-like timers? // opt-out? Use some heuristic on the body type? Nagel-like
// Based on 'n'? Only last chunk of this for loop, unless flow control // timers? Based on 'n'? Only last chunk of this for loop,
// tokens are low? For now, always: // unless flow control tokens are low? For now, always.
// If we change this, see comment below.
err = cc.bw.Flush() err = cc.bw.Flush()
} }
cc.wmu.Unlock() cc.wmu.Unlock()
@ -936,28 +992,33 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
} }
} }
cc.wmu.Lock() if sentEnd {
if !sentEnd { // Already sent END_STREAM (which implies we have no
var trls []byte // trailers) and flushed, because currently all
if hasTrailers { // WriteData frames above get a flush. So we're done.
cc.mu.Lock() return nil
trls = cc.encodeTrailers(req) }
cc.mu.Unlock()
}
// Avoid forgetting to send an END_STREAM if the encoded var trls []byte
// trailers are 0 bytes. Both results produce and END_STREAM. if hasTrailers {
if len(trls) > 0 { cc.mu.Lock()
err = cc.writeHeaders(cs.ID, true, trls) defer cc.mu.Unlock()
} else { trls = cc.encodeTrailers(req)
err = cc.fr.WriteData(cs.ID, true, nil) }
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
// Two ways to send END_STREAM: either with trailers, or
// with an empty DATA frame.
if len(trls) > 0 {
err = cc.writeHeaders(cs.ID, true, trls)
} else {
err = cc.fr.WriteData(cs.ID, true, nil)
} }
if ferr := cc.bw.Flush(); ferr != nil && err == nil { if ferr := cc.bw.Flush(); ferr != nil && err == nil {
err = ferr err = ferr
} }
cc.wmu.Unlock()
return err return err
} }
@ -1010,6 +1071,26 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if host == "" { if host == "" {
host = req.URL.Host host = req.URL.Host
} }
host, err := httplex.PunycodeHostPort(host)
if err != nil {
return nil, err
}
var path string
if req.Method != "CONNECT" {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return nil, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers and return an error before we // Check for any invalid headers and return an error before we
// potentially pollute our hpack state. (We want to be able to // potentially pollute our hpack state. (We want to be able to
@ -1033,8 +1114,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
cc.writeHeader(":authority", host) cc.writeHeader(":authority", host)
cc.writeHeader(":method", req.Method) cc.writeHeader(":method", req.Method)
if req.Method != "CONNECT" { if req.Method != "CONNECT" {
cc.writeHeader(":path", req.URL.RequestURI()) cc.writeHeader(":path", path)
cc.writeHeader(":scheme", "https") cc.writeHeader(":scheme", req.URL.Scheme)
} }
if trailers != "" { if trailers != "" {
cc.writeHeader("trailer", trailers) cc.writeHeader("trailer", trailers)
@ -1161,6 +1242,9 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
if andRemove && cs != nil && !cc.closed { if andRemove && cs != nil && !cc.closed {
cc.lastActive = time.Now() cc.lastActive = time.Now()
delete(cc.streams, id) delete(cc.streams, id)
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
}
close(cs.done) close(cs.done)
cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
} }
@ -1203,27 +1287,37 @@ func (e GoAwayError) Error() string {
e.LastStreamID, e.ErrCode, e.DebugData) e.LastStreamID, e.ErrCode, e.DebugData)
} }
func isEOFOrNetReadError(err error) bool {
if err == io.EOF {
return true
}
ne, ok := err.(*net.OpError)
return ok && ne.Op == "read"
}
func (rl *clientConnReadLoop) cleanup() { func (rl *clientConnReadLoop) cleanup() {
cc := rl.cc cc := rl.cc
defer cc.tconn.Close() defer cc.tconn.Close()
defer cc.t.connPool().MarkDead(cc) defer cc.t.connPool().MarkDead(cc)
defer close(cc.readerDone) defer close(cc.readerDone)
if cc.idleTimer != nil {
cc.idleTimer.Stop()
}
// Close any response bodies if the server closes prematurely. // Close any response bodies if the server closes prematurely.
// TODO: also do this if we've written the headers but not // TODO: also do this if we've written the headers but not
// gotten a response yet. // gotten a response yet.
err := cc.readerErr err := cc.readerErr
cc.mu.Lock() cc.mu.Lock()
if err == io.EOF { if cc.goAway != nil && isEOFOrNetReadError(err) {
if cc.goAway != nil { err = GoAwayError{
err = GoAwayError{ LastStreamID: cc.goAway.LastStreamID,
LastStreamID: cc.goAway.LastStreamID, ErrCode: cc.goAway.ErrCode,
ErrCode: cc.goAway.ErrCode, DebugData: cc.goAwayDebug,
DebugData: cc.goAwayDebug,
}
} else {
err = io.ErrUnexpectedEOF
} }
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
} }
for _, cs := range rl.activeRes { for _, cs := range rl.activeRes {
cs.bufPipe.CloseWithError(err) cs.bufPipe.CloseWithError(err)
@ -1243,15 +1337,20 @@ func (rl *clientConnReadLoop) cleanup() {
func (rl *clientConnReadLoop) run() error { func (rl *clientConnReadLoop) run() error {
cc := rl.cc cc := rl.cc
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
gotReply := false // ever saw a reply gotReply := false // ever saw a HEADERS reply
gotSettings := false
for { for {
f, err := cc.fr.ReadFrame() f, err := cc.fr.ReadFrame()
if err != nil { if err != nil {
cc.vlogf("Transport readFrame error: (%T) %v", err, err) cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
} }
if se, ok := err.(StreamError); ok { if se, ok := err.(StreamError); ok {
if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
rl.endStreamError(cs, cc.fr.errDetail) cs.cc.writeStreamReset(cs.ID, se.Code, err)
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
rl.endStreamError(cs, se)
} }
continue continue
} else if err != nil { } else if err != nil {
@ -1260,6 +1359,13 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs { if VerboseLogs {
cc.vlogf("http2: Transport received %s", summarizeFrame(f)) cc.vlogf("http2: Transport received %s", summarizeFrame(f))
} }
if !gotSettings {
if _, ok := f.(*SettingsFrame); !ok {
cc.logf("protocol error: received %T before a SETTINGS frame", f)
return ConnectionError(ErrCodeProtocol)
}
gotSettings = true
}
maybeIdle := false // whether frame might transition us to idle maybeIdle := false // whether frame might transition us to idle
switch f := f.(type) { switch f := f.(type) {
@ -1288,6 +1394,9 @@ func (rl *clientConnReadLoop) run() error {
cc.logf("Transport: unhandled response frame type %T", f) cc.logf("Transport: unhandled response frame type %T", f)
} }
if err != nil { if err != nil {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
return err return err
} }
if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
@ -1419,8 +1528,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return res, nil return res, nil
} }
buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
cs.bufPipe = pipe{b: buf}
cs.bytesRemain = res.ContentLength cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs} res.Body = transportResponseBody{cs}
go cs.awaitRequestCancel(cs.req) go cs.awaitRequestCancel(cs.req)
@ -1581,16 +1689,20 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// by the peer? Tough without accumulating too much state. // by the peer? Tough without accumulating too much state.
// But at least return their flow control: // But at least return their flow control:
if len(data) > 0 { if f.Length > 0 {
cc.mu.Lock()
cc.inflow.add(int32(f.Length))
cc.mu.Unlock()
cc.wmu.Lock() cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(len(data))) cc.fr.WriteWindowUpdate(0, uint32(f.Length))
cc.bw.Flush() cc.bw.Flush()
cc.wmu.Unlock() cc.wmu.Unlock()
} }
return nil return nil
} }
if len(data) > 0 { if f.Length > 0 {
if cs.bufPipe.b == nil { if len(data) > 0 && cs.bufPipe.b == nil {
// Data frame after it's already closed? // Data frame after it's already closed?
cc.logf("http2: Transport received DATA frame for closed stream; closing connection") cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
return ConnectionError(ErrCodeProtocol) return ConnectionError(ErrCodeProtocol)
@ -1598,17 +1710,31 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// Check connection-level flow control. // Check connection-level flow control.
cc.mu.Lock() cc.mu.Lock()
if cs.inflow.available() >= int32(len(data)) { if cs.inflow.available() >= int32(f.Length) {
cs.inflow.take(int32(len(data))) cs.inflow.take(int32(f.Length))
} else { } else {
cc.mu.Unlock() cc.mu.Unlock()
return ConnectionError(ErrCodeFlowControl) return ConnectionError(ErrCodeFlowControl)
} }
// Return any padded flow control now, since we won't
// refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
cs.inflow.add(pad)
cc.inflow.add(pad)
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(pad))
cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
cc.bw.Flush()
cc.wmu.Unlock()
}
didReset := cs.didReset
cc.mu.Unlock() cc.mu.Unlock()
if _, err := cs.bufPipe.Write(data); err != nil { if len(data) > 0 && !didReset {
rl.endStreamError(cs, err) if _, err := cs.bufPipe.Write(data); err != nil {
return err rl.endStreamError(cs, err)
return err
}
} }
} }
@ -1637,6 +1763,11 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
if isConnectionCloseRequest(cs.req) { if isConnectionCloseRequest(cs.req) {
rl.closeWhenIdle = true rl.closeWhenIdle = true
} }
select {
case cs.resc <- resAndError{err: err}:
default:
}
} }
func (cs *clientStream) copyTrailers() { func (cs *clientStream) copyTrailers() {
@ -1664,18 +1795,39 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc := rl.cc cc := rl.cc
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock() defer cc.mu.Unlock()
return f.ForeachSetting(func(s Setting) error {
if f.IsAck() {
if cc.wantSettingsAck {
cc.wantSettingsAck = false
return nil
}
return ConnectionError(ErrCodeProtocol)
}
err := f.ForeachSetting(func(s Setting) error {
switch s.ID { switch s.ID {
case SettingMaxFrameSize: case SettingMaxFrameSize:
cc.maxFrameSize = s.Val cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams: case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val cc.maxConcurrentStreams = s.Val
case SettingInitialWindowSize: case SettingInitialWindowSize:
// TODO: error if this is too large. // Values above the maximum flow-control
// window size of 2^31-1 MUST be treated as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR.
if s.Val > math.MaxInt32 {
return ConnectionError(ErrCodeFlowControl)
}
// TODO: adjust flow control of still-open // Adjust flow control of currently-open
// frames by the difference of the old initial // frames by the difference of the old initial
// window size and this one. // window size and this one.
delta := int32(s.Val) - int32(cc.initialWindowSize)
for _, cs := range cc.streams {
cs.flow.add(delta)
}
cc.cond.Broadcast()
cc.initialWindowSize = s.Val cc.initialWindowSize = s.Val
default: default:
// TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
@ -1683,6 +1835,16 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
} }
return nil return nil
}) })
if err != nil {
return err
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
cc.fr.WriteSettingsAck()
cc.bw.Flush()
return cc.werr
} }
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
@ -1719,7 +1881,7 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
// which closes this, so there // which closes this, so there
// isn't a race. // isn't a race.
default: default:
err := StreamError{cs.ID, f.ErrCode} err := streamError(cs.ID, f.ErrCode)
cs.resetErr = err cs.resetErr = err
close(cs.peerReset) close(cs.peerReset)
cs.bufPipe.CloseWithError(err) cs.bufPipe.CloseWithError(err)
@ -1729,10 +1891,56 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
return nil return nil
} }
// Ping sends a PING frame to the server and waits for the ack.
// Public implementation is in go17.go and not_go17.go
func (cc *ClientConn) ping(ctx contextContext) error {
c := make(chan struct{})
// Generate a random payload
var p [8]byte
for {
if _, err := rand.Read(p[:]); err != nil {
return err
}
cc.mu.Lock()
// check for dup before insert
if _, found := cc.pings[p]; !found {
cc.pings[p] = c
cc.mu.Unlock()
break
}
cc.mu.Unlock()
}
cc.wmu.Lock()
if err := cc.fr.WritePing(false, p); err != nil {
cc.wmu.Unlock()
return err
}
if err := cc.bw.Flush(); err != nil {
cc.wmu.Unlock()
return err
}
cc.wmu.Unlock()
select {
case <-c:
return nil
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
// connection closed
return cc.readerErr
}
}
func (rl *clientConnReadLoop) processPing(f *PingFrame) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if f.IsAck() { if f.IsAck() {
// 6.7 PING: " An endpoint MUST NOT respond to PING frames cc := rl.cc
// containing this flag." cc.mu.Lock()
defer cc.mu.Unlock()
// If ack, notify listener if any
if c, ok := cc.pings[f.Data]; ok {
close(c)
delete(cc.pings, f.Data)
}
return nil return nil
} }
cc := rl.cc cc := rl.cc
@ -1854,6 +2062,9 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body
resc := make(chan error, 1) resc := make(chan error, 1)
s.resc = resc s.resc = resc
s.fn = func() { s.fn = func() {
cs.cc.mu.Lock()
cs.startedWrite = true
cs.cc.mu.Unlock()
resc <- cs.writeRequestBody(body, cs.req.Body) resc <- cs.writeRequestBody(body, cs.req.Body)
} }
s.delay = t.expectContinueTimeout() s.delay = t.expectContinueTimeout()

View file

@ -9,6 +9,7 @@ import (
"fmt" "fmt"
"log" "log"
"net/http" "net/http"
"net/url"
"time" "time"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
@ -18,6 +19,11 @@ import (
// writeFramer is implemented by any type that is used to write frames. // writeFramer is implemented by any type that is used to write frames.
type writeFramer interface { type writeFramer interface {
writeFrame(writeContext) error writeFrame(writeContext) error
// staysWithinBuffer reports whether this writer promises that
// it will only write less than or equal to size bytes, and it
// won't Flush the write context.
staysWithinBuffer(size int) bool
} }
// writeContext is the interface needed by the various frame writer // writeContext is the interface needed by the various frame writer
@ -39,9 +45,10 @@ type writeContext interface {
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
} }
// endsStream reports whether the given frame writer w will locally // writeEndsStream reports whether w writes a frame that will transition
// close the stream. // the stream to a half-closed local state. This returns false for RST_STREAM,
func endsStream(w writeFramer) bool { // which closes the entire stream (not just the local half).
func writeEndsStream(w writeFramer) bool {
switch v := w.(type) { switch v := w.(type) {
case *writeData: case *writeData:
return v.endStream return v.endStream
@ -51,7 +58,7 @@ func endsStream(w writeFramer) bool {
// This can only happen if the caller reuses w after it's // This can only happen if the caller reuses w after it's
// been intentionally nil'ed out to prevent use. Keep this // been intentionally nil'ed out to prevent use. Keep this
// here to catch future refactoring breaking it. // here to catch future refactoring breaking it.
panic("endsStream called on nil writeFramer") panic("writeEndsStream called on nil writeFramer")
} }
return false return false
} }
@ -62,8 +69,16 @@ func (flushFrameWriter) writeFrame(ctx writeContext) error {
return ctx.Flush() return ctx.Flush()
} }
func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
type writeSettings []Setting type writeSettings []Setting
func (s writeSettings) staysWithinBuffer(max int) bool {
const settingSize = 6 // uint16 + uint32
return frameHeaderLen+settingSize*len(s) <= max
}
func (s writeSettings) writeFrame(ctx writeContext) error { func (s writeSettings) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettings([]Setting(s)...) return ctx.Framer().WriteSettings([]Setting(s)...)
} }
@ -83,6 +98,8 @@ func (p *writeGoAway) writeFrame(ctx writeContext) error {
return err return err
} }
func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
type writeData struct { type writeData struct {
streamID uint32 streamID uint32
p []byte p []byte
@ -97,6 +114,10 @@ func (w *writeData) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
} }
func (w *writeData) staysWithinBuffer(max int) bool {
return frameHeaderLen+len(w.p) <= max
}
// handlerPanicRST is the message sent from handler goroutines when // handlerPanicRST is the message sent from handler goroutines when
// the handler panics. // the handler panics.
type handlerPanicRST struct { type handlerPanicRST struct {
@ -107,22 +128,57 @@ func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
} }
func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
} }
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
type writePingAck struct{ pf *PingFrame } type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error { func (w writePingAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WritePing(true, w.pf.Data) return ctx.Framer().WritePing(true, w.pf.Data)
} }
func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
type writeSettingsAck struct{} type writeSettingsAck struct{}
func (writeSettingsAck) writeFrame(ctx writeContext) error { func (writeSettingsAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettingsAck() return ctx.Framer().WriteSettingsAck()
} }
func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
// for the first/last fragment, respectively.
func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
// that all peers must support (16KB). Later we could care
// more and send larger frames if the peer advertised it, but
// there's little point. Most headers are small anyway (so we
// generally won't have CONTINUATION frames), and extra frames
// only waste 9 bytes anyway.
const maxFrameSize = 16384
first := true
for len(headerBlock) > 0 {
frag := headerBlock
if len(frag) > maxFrameSize {
frag = frag[:maxFrameSize]
}
headerBlock = headerBlock[len(frag):]
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
return err
}
first = false
}
return nil
}
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
// for HTTP response headers or trailers from a server handler. // for HTTP response headers or trailers from a server handler.
type writeResHeaders struct { type writeResHeaders struct {
@ -144,6 +200,17 @@ func encKV(enc *hpack.Encoder, k, v string) {
enc.WriteField(hpack.HeaderField{Name: k, Value: v}) enc.WriteField(hpack.HeaderField{Name: k, Value: v})
} }
func (w *writeResHeaders) staysWithinBuffer(max int) bool {
// TODO: this is a common one. It'd be nice to return true
// here and get into the fast path if we could be clever and
// calculate the size fast enough, or at least a conservative
// uppper bound that usually fires. (Maybe if w.h and
// w.trailers are nil, so we don't need to enumerate it.)
// Otherwise I'm afraid that just calculating the length to
// answer this question would be slower than the ~2µs benefit.
return false
}
func (w *writeResHeaders) writeFrame(ctx writeContext) error { func (w *writeResHeaders) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder() enc, buf := ctx.HeaderEncoder()
buf.Reset() buf.Reset()
@ -169,39 +236,69 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error {
panic("unexpected empty hpack") panic("unexpected empty hpack")
} }
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
// that all peers must support (16KB). Later we could care }
// more and send larger frames if the peer advertised it, but
// there's little point. Most headers are small anyway (so we
// generally won't have CONTINUATION frames), and extra frames
// only waste 9 bytes anyway.
const maxFrameSize = 16384
first := true func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
for len(headerBlock) > 0 { if firstFrag {
frag := headerBlock return ctx.Framer().WriteHeaders(HeadersFrameParam{
if len(frag) > maxFrameSize { StreamID: w.streamID,
frag = frag[:maxFrameSize] BlockFragment: frag,
} EndStream: w.endStream,
headerBlock = headerBlock[len(frag):] EndHeaders: lastFrag,
endHeaders := len(headerBlock) == 0 })
var err error } else {
if first { return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
first = false }
err = ctx.Framer().WriteHeaders(HeadersFrameParam{ }
StreamID: w.streamID,
BlockFragment: frag, // writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
EndStream: w.endStream, type writePushPromise struct {
EndHeaders: endHeaders, streamID uint32 // pusher stream
}) method string // for :method
} else { url *url.URL // for :scheme, :authority, :path
err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) h http.Header
}
if err != nil { // Creates an ID for a pushed stream. This runs on serveG just before
return err // the frame is written. The returned ID is copied to promisedID.
} allocatePromisedID func() (uint32, error)
promisedID uint32
}
func (w *writePushPromise) staysWithinBuffer(max int) bool {
// TODO: see writeResHeaders.staysWithinBuffer
return false
}
func (w *writePushPromise) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
encKV(enc, ":method", w.method)
encKV(enc, ":scheme", w.url.Scheme)
encKV(enc, ":authority", w.url.Host)
encKV(enc, ":path", w.url.RequestURI())
encodeHeaders(enc, w.h, nil)
headerBlock := buf.Bytes()
if len(headerBlock) == 0 {
panic("unexpected empty hpack")
}
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
}
func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
if firstFrag {
return ctx.Framer().WritePushPromise(PushPromiseParam{
StreamID: w.streamID,
PromiseID: w.promisedID,
BlockFragment: frag,
EndHeaders: lastFrag,
})
} else {
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
} }
return nil
} }
type write100ContinueHeadersFrame struct { type write100ContinueHeadersFrame struct {
@ -220,15 +317,24 @@ func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
}) })
} }
func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
// Sloppy but conservative:
return 9+2*(len(":status")+len("100")) <= max
}
type writeWindowUpdate struct { type writeWindowUpdate struct {
streamID uint32 // or 0 for conn-level streamID uint32 // or 0 for conn-level
n uint32 n uint32
} }
func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
} }
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
// is encoded only only if k is in keys.
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
if keys == nil { if keys == nil {
sorter := sorterPool.Get().(*sorter) sorter := sorterPool.Get().(*sorter)

View file

@ -6,14 +6,53 @@ package http2
import "fmt" import "fmt"
// frameWriteMsg is a request to write a frame. // WriteScheduler is the interface implemented by HTTP/2 write schedulers.
type frameWriteMsg struct { // Methods are never called concurrently.
type WriteScheduler interface {
// OpenStream opens a new stream in the write scheduler.
// It is illegal to call this with streamID=0 or with a streamID that is
// already open -- the call may panic.
OpenStream(streamID uint32, options OpenStreamOptions)
// CloseStream closes a stream in the write scheduler. Any frames queued on
// this stream should be discarded. It is illegal to call this on a stream
// that is not open -- the call may panic.
CloseStream(streamID uint32)
// AdjustStream adjusts the priority of the given stream. This may be called
// on a stream that has not yet been opened or has been closed. Note that
// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
// https://tools.ietf.org/html/rfc7540#section-5.1
AdjustStream(streamID uint32, priority PriorityParam)
// Push queues a frame in the scheduler. In most cases, this will not be
// called with wr.StreamID()!=0 unless that stream is currently open. The one
// exception is RST_STREAM frames, which may be sent on idle or closed streams.
Push(wr FrameWriteRequest)
// Pop dequeues the next frame to write. Returns false if no frames can
// be written. Frames with a given wr.StreamID() are Pop'd in the same
// order they are Push'd.
Pop() (wr FrameWriteRequest, ok bool)
}
// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
}
// FrameWriteRequest is a request to write a frame.
type FrameWriteRequest struct {
// write is the interface value that does the writing, once the // write is the interface value that does the writing, once the
// writeScheduler (below) has decided to select this frame // WriteScheduler has selected this frame to write. The write
// to write. The write functions are all defined in write.go. // functions are all defined in write.go.
write writeFramer write writeFramer
stream *stream // used for prioritization. nil for non-stream frames. // stream is the stream on which this frame will be written.
// nil for non-stream frames like PING and SETTINGS.
stream *stream
// done, if non-nil, must be a buffered channel with space for // done, if non-nil, must be a buffered channel with space for
// 1 message and is sent the return value from write (or an // 1 message and is sent the return value from write (or an
@ -21,263 +60,183 @@ type frameWriteMsg struct {
done chan error done chan error
} }
// for debugging only: // StreamID returns the id of the stream this frame will be written to.
func (wm frameWriteMsg) String() string { // 0 is used for non-stream frames such as PING and SETTINGS.
var streamID uint32 func (wr FrameWriteRequest) StreamID() uint32 {
if wm.stream != nil { if wr.stream == nil {
streamID = wm.stream.id if se, ok := wr.write.(StreamError); ok {
} // (*serverConn).resetStream doesn't set
var des string // stream because it doesn't necessarily have
if s, ok := wm.write.(fmt.Stringer); ok { // one. So special case this type of write
des = s.String() // message.
} else { return se.StreamID
des = fmt.Sprintf("%T", wm.write)
}
return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
}
// writeScheduler tracks pending frames to write, priorities, and decides
// the next one to use. It is not thread-safe.
type writeScheduler struct {
// zero are frames not associated with a specific stream.
// They're sent before any stream-specific freams.
zero writeQueue
// maxFrameSize is the maximum size of a DATA frame
// we'll write. Must be non-zero and between 16K-16M.
maxFrameSize uint32
// sq contains the stream-specific queues, keyed by stream ID.
// when a stream is idle, it's deleted from the map.
sq map[uint32]*writeQueue
// canSend is a slice of memory that's reused between frame
// scheduling decisions to hold the list of writeQueues (from sq)
// which have enough flow control data to send. After canSend is
// built, the best is selected.
canSend []*writeQueue
// pool of empty queues for reuse.
queuePool []*writeQueue
}
func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
if len(q.s) != 0 {
panic("queue must be empty")
}
ws.queuePool = append(ws.queuePool, q)
}
func (ws *writeScheduler) getEmptyQueue() *writeQueue {
ln := len(ws.queuePool)
if ln == 0 {
return new(writeQueue)
}
q := ws.queuePool[ln-1]
ws.queuePool = ws.queuePool[:ln-1]
return q
}
func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
func (ws *writeScheduler) add(wm frameWriteMsg) {
st := wm.stream
if st == nil {
ws.zero.push(wm)
} else {
ws.streamQueue(st.id).push(wm)
}
}
func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
if q, ok := ws.sq[streamID]; ok {
return q
}
if ws.sq == nil {
ws.sq = make(map[uint32]*writeQueue)
}
q := ws.getEmptyQueue()
ws.sq[streamID] = q
return q
}
// take returns the most important frame to write and removes it from the scheduler.
// It is illegal to call this if the scheduler is empty or if there are no connection-level
// flow control bytes available.
func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
if ws.maxFrameSize == 0 {
panic("internal error: ws.maxFrameSize not initialized or invalid")
}
// If there any frames not associated with streams, prefer those first.
// These are usually SETTINGS, etc.
if !ws.zero.empty() {
return ws.zero.shift(), true
}
if len(ws.sq) == 0 {
return
}
// Next, prioritize frames on streams that aren't DATA frames (no cost).
for id, q := range ws.sq {
if q.firstIsNoCost() {
return ws.takeFrom(id, q)
} }
}
// Now, all that remains are DATA frames with non-zero bytes to
// send. So pick the best one.
if len(ws.canSend) != 0 {
panic("should be empty")
}
for _, q := range ws.sq {
if n := ws.streamWritableBytes(q); n > 0 {
ws.canSend = append(ws.canSend, q)
}
}
if len(ws.canSend) == 0 {
return
}
defer ws.zeroCanSend()
// TODO: find the best queue
q := ws.canSend[0]
return ws.takeFrom(q.streamID(), q)
}
// zeroCanSend is defered from take.
func (ws *writeScheduler) zeroCanSend() {
for i := range ws.canSend {
ws.canSend[i] = nil
}
ws.canSend = ws.canSend[:0]
}
// streamWritableBytes returns the number of DATA bytes we could write
// from the given queue's stream, if this stream/queue were
// selected. It is an error to call this if q's head isn't a
// *writeData.
func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
wm := q.head()
ret := wm.stream.flow.available() // max we can write
if ret == 0 {
return 0 return 0
} }
if int32(ws.maxFrameSize) < ret { return wr.stream.id
ret = int32(ws.maxFrameSize)
}
if ret == 0 {
panic("internal error: ws.maxFrameSize not initialized or invalid")
}
wd := wm.write.(*writeData)
if len(wd.p) < int(ret) {
ret = int32(len(wd.p))
}
return ret
} }
func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { // DataSize returns the number of flow control bytes that must be consumed
wm = q.head() // to write this entire frame. This is 0 for non-DATA frames.
// If the first item in this queue costs flow control tokens func (wr FrameWriteRequest) DataSize() int {
// and we don't have enough, write as much as we can. if wd, ok := wr.write.(*writeData); ok {
if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { return len(wd.p)
allowed := wm.stream.flow.available() // max we can write
if allowed == 0 {
// No quota available. Caller can try the next stream.
return frameWriteMsg{}, false
}
if int32(ws.maxFrameSize) < allowed {
allowed = int32(ws.maxFrameSize)
}
// TODO: further restrict the allowed size, because even if
// the peer says it's okay to write 16MB data frames, we might
// want to write smaller ones to properly weight competing
// streams' priorities.
if len(wd.p) > int(allowed) {
wm.stream.flow.take(allowed)
chunk := wd.p[:allowed]
wd.p = wd.p[allowed:]
// Make up a new write message of a valid size, rather
// than shifting one off the queue.
return frameWriteMsg{
stream: wm.stream,
write: &writeData{
streamID: wd.streamID,
p: chunk,
// even if the original had endStream set, there
// arebytes remaining because len(wd.p) > allowed,
// so we know endStream is false:
endStream: false,
},
// our caller is blocking on the final DATA frame, not
// these intermediates, so no need to wait:
done: nil,
}, true
}
wm.stream.flow.take(int32(len(wd.p)))
} }
return 0
q.shift()
if q.empty() {
ws.putEmptyQueue(q)
delete(ws.sq, id)
}
return wm, true
} }
func (ws *writeScheduler) forgetStream(id uint32) { // Consume consumes min(n, available) bytes from this frame, where available
q, ok := ws.sq[id] // is the number of flow control bytes available on the stream. Consume returns
if !ok { // 0, 1, or 2 frames, where the integer return value gives the number of frames
// returned.
//
// If flow control prevents consuming any bytes, this returns (_, _, 0). If
// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
// underlying stream's flow control budget.
func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
var empty FrameWriteRequest
// Non-DATA frames are always consumed whole.
wd, ok := wr.write.(*writeData)
if !ok || len(wd.p) == 0 {
return wr, empty, 1
}
// Might need to split after applying limits.
allowed := wr.stream.flow.available()
if n < allowed {
allowed = n
}
if wr.stream.sc.maxFrameSize < allowed {
allowed = wr.stream.sc.maxFrameSize
}
if allowed <= 0 {
return empty, empty, 0
}
if len(wd.p) > int(allowed) {
wr.stream.flow.take(allowed)
consumed := FrameWriteRequest{
stream: wr.stream,
write: &writeData{
streamID: wd.streamID,
p: wd.p[:allowed],
// Even if the original had endStream set, there
// are bytes remaining because len(wd.p) > allowed,
// so we know endStream is false.
endStream: false,
},
// Our caller is blocking on the final DATA frame, not
// this intermediate frame, so no need to wait.
done: nil,
}
rest := FrameWriteRequest{
stream: wr.stream,
write: &writeData{
streamID: wd.streamID,
p: wd.p[allowed:],
endStream: wd.endStream,
},
done: wr.done,
}
return consumed, rest, 2
}
// The frame is consumed whole.
// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
wr.stream.flow.take(int32(len(wd.p)))
return wr, empty, 1
}
// String is for debugging only.
func (wr FrameWriteRequest) String() string {
var des string
if s, ok := wr.write.(fmt.Stringer); ok {
des = s.String()
} else {
des = fmt.Sprintf("%T", wr.write)
}
return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
}
// replyToWriter sends err to wr.done and panics if the send must block
// This does nothing if wr.done is nil.
func (wr *FrameWriteRequest) replyToWriter(err error) {
if wr.done == nil {
return return
} }
delete(ws.sq, id) select {
case wr.done <- err:
// But keep it for others later. default:
for i := range q.s { panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
q.s[i] = frameWriteMsg{}
} }
q.s = q.s[:0] wr.write = nil // prevent use (assume it's tainted after wr.done send)
ws.putEmptyQueue(q)
} }
// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct { type writeQueue struct {
s []frameWriteMsg s []FrameWriteRequest
} }
// streamID returns the stream ID for a non-empty stream-specific queue.
func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
func (q *writeQueue) empty() bool { return len(q.s) == 0 } func (q *writeQueue) empty() bool { return len(q.s) == 0 }
func (q *writeQueue) push(wm frameWriteMsg) { func (q *writeQueue) push(wr FrameWriteRequest) {
q.s = append(q.s, wm) q.s = append(q.s, wr)
} }
// head returns the next item that would be removed by shift. func (q *writeQueue) shift() FrameWriteRequest {
func (q *writeQueue) head() frameWriteMsg {
if len(q.s) == 0 { if len(q.s) == 0 {
panic("invalid use of queue") panic("invalid use of queue")
} }
return q.s[0] wr := q.s[0]
}
func (q *writeQueue) shift() frameWriteMsg {
if len(q.s) == 0 {
panic("invalid use of queue")
}
wm := q.s[0]
// TODO: less copy-happy queue. // TODO: less copy-happy queue.
copy(q.s, q.s[1:]) copy(q.s, q.s[1:])
q.s[len(q.s)-1] = frameWriteMsg{} q.s[len(q.s)-1] = FrameWriteRequest{}
q.s = q.s[:len(q.s)-1] q.s = q.s[:len(q.s)-1]
return wm return wr
} }
func (q *writeQueue) firstIsNoCost() bool { // consume consumes up to n bytes from q.s[0]. If the frame is
if df, ok := q.s[0].write.(*writeData); ok { // entirely consumed, it is removed from the queue. If the frame
return len(df.p) == 0 // is partially consumed, the frame is kept with the consumed
// bytes removed. Returns true iff any bytes were consumed.
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
if len(q.s) == 0 {
return FrameWriteRequest{}, false
} }
return true consumed, rest, numresult := q.s[0].Consume(n)
switch numresult {
case 0:
return FrameWriteRequest{}, false
case 1:
q.shift()
case 2:
q.s[0] = rest
}
return consumed, true
}
type writeQueuePool []*writeQueue
// put inserts an unused writeQueue into the pool.
func (p *writeQueuePool) put(q *writeQueue) {
for i := range q.s {
q.s[i] = FrameWriteRequest{}
}
q.s = q.s[:0]
*p = append(*p, q)
}
// get returns an empty writeQueue.
func (p *writeQueuePool) get() *writeQueue {
ln := len(*p)
if ln == 0 {
return new(writeQueue)
}
x := ln - 1
q := (*p)[x]
(*p)[x] = nil
*p = (*p)[:x]
return q
} }

452
vendor/golang.org/x/net/http2/writesched_priority.go generated vendored Normal file
View file

@ -0,0 +1,452 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
"sort"
)
// RFC 7540, Section 5.3.5: the default weight is 16.
const priorityDefaultWeight = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct {
// MaxClosedNodesInTree controls the maximum number of closed streams to
// retain in the priority tree. Setting this to zero saves a small amount
// of memory at the cost of performance.
//
// See RFC 7540, Section 5.3.4:
// "It is possible for a stream to become closed while prioritization
// information ... is in transit. ... This potentially creates suboptimal
// prioritization, since the stream could be given a priority that is
// different from what is intended. To avoid these problems, an endpoint
// SHOULD retain stream prioritization state for a period after streams
// become closed. The longer state is retained, the lower the chance that
// streams are assigned incorrect or default priority values."
MaxClosedNodesInTree int
// MaxIdleNodesInTree controls the maximum number of idle streams to
// retain in the priority tree. Setting this to zero saves a small amount
// of memory at the cost of performance.
//
// See RFC 7540, Section 5.3.4:
// Similarly, streams that are in the "idle" state can be assigned
// priority or become a parent of other streams. This allows for the
// creation of a grouping node in the dependency tree, which enables
// more flexible expressions of priority. Idle streams begin with a
// default priority (Section 5.3.5).
MaxIdleNodesInTree int
// ThrottleOutOfOrderWrites enables write throttling to help ensure that
// data is delivered in priority order. This works around a race where
// stream B depends on stream A and both streams are about to call Write
// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
// write as much data from B as possible, but this is suboptimal because A
// is a higher-priority stream. With throttling enabled, we write a small
// amount of data from B to minimize the amount of bandwidth that B can
// steal from A.
ThrottleOutOfOrderWrites bool
}
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3.
// If cfg is nil, default options are used.
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
if cfg == nil {
// For justification of these defaults, see:
// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
cfg = &PriorityWriteSchedulerConfig{
MaxClosedNodesInTree: 10,
MaxIdleNodesInTree: 10,
ThrottleOutOfOrderWrites: false,
}
}
ws := &priorityWriteScheduler{
nodes: make(map[uint32]*priorityNode),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
}
ws.nodes[0] = &ws.root
if cfg.ThrottleOutOfOrderWrites {
ws.writeThrottleLimit = 1024
} else {
ws.writeThrottleLimit = math.MaxInt32
}
return ws
}
type priorityNodeState int
const (
priorityNodeOpen priorityNodeState = iota
priorityNodeClosed
priorityNodeIdle
)
// priorityNode is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
type priorityNode struct {
q writeQueue // queue of pending frames to write
id uint32 // id of the stream, or 0 for the root of the tree
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
state priorityNodeState // open | closed | idle
bytes int64 // number of bytes written by this node, or 0 if closed
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
parent *priorityNode
kids *priorityNode // start of the kids list
prev, next *priorityNode // doubly-linked list of siblings
}
func (n *priorityNode) setParent(parent *priorityNode) {
if n == parent {
panic("setParent to self")
}
if n.parent == parent {
return
}
// Unlink from current parent.
if parent := n.parent; parent != nil {
if n.prev == nil {
parent.kids = n.next
} else {
n.prev.next = n.next
}
if n.next != nil {
n.next.prev = n.prev
}
}
// Link to new parent.
// If parent=nil, remove n from the tree.
// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
n.parent = parent
if parent == nil {
n.next = nil
n.prev = nil
} else {
n.next = parent.kids
n.prev = nil
if n.next != nil {
n.next.prev = n
}
parent.kids = n
}
}
func (n *priorityNode) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
}
}
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
// with a non-empty write queue. When f returns true, this funcion returns true and the
// walk halts. tmp is used as scratch space for sorting.
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
if n.kids == nil {
return false
}
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
openParent = openParent || (n.state == priorityNodeOpen)
}
// Common case: only one kid or all kids have the same weight.
// Some clients don't use weights; other clients (like web browsers)
// use mostly-linear priority trees.
w := n.kids.weight
needSort := false
for k := n.kids.next; k != nil; k = k.next {
if k.weight != w {
needSort = true
break
}
}
if !needSort {
for k := n.kids; k != nil; k = k.next {
if k.walkReadyInOrder(openParent, tmp, f) {
return true
}
}
return false
}
// Uncommon case: sort the child nodes. We remove the kids from the parent,
// then re-insert after sorting so we can reuse tmp for future sort calls.
*tmp = (*tmp)[:0]
for n.kids != nil {
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
sort.Sort(sortPriorityNodeSiblings(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
for k := n.kids; k != nil; k = k.next {
if k.walkReadyInOrder(openParent, tmp, f) {
return true
}
}
return false
}
type sortPriorityNodeSiblings []*priorityNode
func (z sortPriorityNodeSiblings) Len() int { return len(z) }
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z sortPriorityNodeSiblings) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 {
return wi >= wk
}
if bk == 0 {
return false
}
return bi/bk <= wi/wk
}
type priorityWriteScheduler struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
root priorityNode
// nodes maps stream ids to priority tree nodes.
nodes map[uint32]*priorityNode
// maxID is the maximum stream id in nodes.
maxID uint32
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
closedNodes, idleNodes []*priorityNode
// From the config.
maxClosedNodesInTree int
maxIdleNodesInTree int
writeThrottleLimit int32
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
tmp []*priorityNode
// pool of empty queues for reuse.
queuePool writeQueuePool
}
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
if curr.state != priorityNodeIdle {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
curr.state = priorityNodeOpen
return
}
// RFC 7540, Section 5.3.5:
// "All streams are initially assigned a non-exclusive dependency on stream 0x0.
// Pushed streams initially depend on their associated stream. In both cases,
// streams are assigned a default weight of 16."
parent := ws.nodes[options.PusherID]
if parent == nil {
parent = &ws.root
}
n := &priorityNode{
q: *ws.queuePool.get(),
id: streamID,
weight: priorityDefaultWeight,
state: priorityNodeOpen,
}
n.setParent(parent)
ws.nodes[streamID] = n
if streamID > ws.maxID {
ws.maxID = streamID
}
}
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
if ws.nodes[streamID].state != priorityNodeOpen {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
n.state = priorityNodeClosed
n.addBytes(-n.bytes)
q := n.q
ws.queuePool.put(&q)
n.q.s = nil
if ws.maxClosedNodesInTree > 0 {
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
} else {
ws.removeNode(n)
}
}
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
// If streamID does not exist, there are two cases:
// - A closed stream that has been removed (this will have ID <= maxID)
// - An idle stream that is being used for "grouping" (this will have ID > maxID)
n := ws.nodes[streamID]
if n == nil {
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
return
}
ws.maxID = streamID
n = &priorityNode{
q: *ws.queuePool.get(),
id: streamID,
weight: priorityDefaultWeight,
state: priorityNodeIdle,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
}
// Section 5.3.1: A dependency on a stream that is not currently in the tree
// results in that stream being given a default priority (Section 5.3.5).
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
n.weight = priorityDefaultWeight
return
}
// Ignore if the client tries to make a node its own parent.
if n == parent {
return
}
// Section 5.3.3:
// "If a stream is made dependent on one of its own dependencies, the
// formerly dependent stream is first moved to be dependent on the
// reprioritized stream's previous parent. The moved dependency retains
// its weight."
//
// That is: if parent depends on n, move parent to depend on n.parent.
for x := parent.parent; x != nil; x = x.parent {
if x == n {
parent.setParent(n.parent)
break
}
}
// Section 5.3.3: The exclusive flag causes the stream to become the sole
// dependency of its parent stream, causing other dependencies to become
// dependent on the exclusive stream.
if priority.Exclusive {
k := parent.kids
for k != nil {
next := k.next
if k != n {
k.setParent(n)
}
k = next
}
}
n.setParent(parent)
n.weight = priority.Weight
}
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
var n *priorityNode
if id := wr.StreamID(); id == 0 {
n = &ws.root
} else {
n = ws.nodes[id]
if n == nil {
// id is an idle or closed stream. wr should not be a HEADERS or
// DATA frame. However, wr can be a RST_STREAM. In this case, we
// push wr onto the root, rather than creating a new priorityNode,
// since RST_STREAM is tiny and the stream's priority is unknown
// anyway. See issue #17919.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
n = &ws.root
}
}
n.q.push(wr)
}
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
}
wr, ok = n.q.consume(limit)
if !ok {
return false
}
n.addBytes(int64(wr.DataSize()))
// If B depends on A and B continuously has data available but A
// does not, gradually increase the throttling limit to allow B to
// steal more and more bandwidth from A.
if openParent {
ws.writeThrottleLimit += 1024
if ws.writeThrottleLimit < 0 {
ws.writeThrottleLimit = math.MaxInt32
}
} else if ws.enableWriteThrottle {
ws.writeThrottleLimit = 1024
}
return true
})
return wr, ok
}
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
if maxSize == 0 {
return
}
if len(*list) == maxSize {
// Remove the oldest node, then shift left.
ws.removeNode((*list)[0])
x := (*list)[1:]
copy(*list, x)
*list = (*list)[:len(x)]
}
*list = append(*list, n)
}
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
for k := n.kids; k != nil; k = k.next {
k.setParent(n.parent)
}
n.setParent(nil)
delete(ws.nodes, n.id)
}

72
vendor/golang.org/x/net/http2/writesched_random.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "math"
// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
// priorities. Control frames like SETTINGS and PING are written before DATA
// frames, but if no control frames are queued and multiple streams have queued
// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
func NewRandomWriteScheduler() WriteScheduler {
return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
}
type randomWriteScheduler struct {
// zero are frames not associated with a specific stream.
zero writeQueue
// sq contains the stream-specific queues, keyed by stream ID.
// When a stream is idle or closed, it's deleted from the map.
sq map[uint32]*writeQueue
// pool of empty queues for reuse.
queuePool writeQueuePool
}
func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
// no-op: idle streams are not tracked
}
func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
q, ok := ws.sq[streamID]
if !ok {
return
}
delete(ws.sq, streamID)
ws.queuePool.put(q)
}
func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
// no-op: priorities are ignored
}
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
id := wr.StreamID()
if id == 0 {
ws.zero.push(wr)
return
}
q, ok := ws.sq[id]
if !ok {
q = ws.queuePool.get()
ws.sq[id] = q
}
q.push(wr)
}
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
// Control frames first.
if !ws.zero.empty() {
return ws.zero.shift(), true
}
// Iterate over all non-idle streams until finding one that can be consumed.
for _, q := range ws.sq {
if wr, ok := q.consume(math.MaxInt32); ok {
return wr, true
}
}
return FrameWriteRequest{}, false
}

668
vendor/golang.org/x/net/idna/idna.go generated vendored Normal file
View file

@ -0,0 +1,668 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
// deal with the transition from IDNA2003.
//
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
// UTS #46 is defined in http://www.unicode.org/reports/tr46.
// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the
// differences between these two standards.
package idna // import "golang.org/x/net/idna"
import (
"fmt"
"strings"
"unicode/utf8"
"golang.org/x/text/secure/bidirule"
"golang.org/x/text/unicode/norm"
)
// NOTE: Unlike common practice in Go APIs, the functions will return a
// sanitized domain name in case of errors. Browsers sometimes use a partially
// evaluated string as lookup.
// TODO: the current error handling is, in my opinion, the least opinionated.
// Other strategies are also viable, though:
// Option 1) Return an empty string in case of error, but allow the user to
// specify explicitly which errors to ignore.
// Option 2) Return the partially evaluated string if it is itself a valid
// string, otherwise return the empty string in case of error.
// Option 3) Option 1 and 2.
// Option 4) Always return an empty string for now and implement Option 1 as
// needed, and document that the return string may not be empty in case of
// error in the future.
// I think Option 1 is best, but it is quite opinionated.
// ToASCII is a wrapper for Punycode.ToASCII.
func ToASCII(s string) (string, error) {
return Punycode.process(s, true)
}
// ToUnicode is a wrapper for Punycode.ToUnicode.
func ToUnicode(s string) (string, error) {
return Punycode.process(s, false)
}
// An Option configures a Profile at creation time.
type Option func(*options)
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
// compatibility. It is used by most browsers when resolving domain names. This
// option is only meaningful if combined with MapForLookup.
func Transitional(transitional bool) Option {
return func(o *options) { o.transitional = true }
}
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
// are longer than allowed by the RFC.
func VerifyDNSLength(verify bool) Option {
return func(o *options) { o.verifyDNSLength = verify }
}
// ValidateLabels sets whether to check the mandatory label validation criteria
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
// of hyphens ('-'), normalization, validity of runes, and the context rules.
func ValidateLabels(enable bool) Option {
return func(o *options) {
// Don't override existing mappings, but set one that at least checks
// normalization if it is not set.
if o.mapping == nil && enable {
o.mapping = normalize
}
o.trie = trie
o.validateLabels = enable
o.fromPuny = validateFromPunycode
}
}
// StrictDomainName limits the set of permissable ASCII characters to those
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
//
// This option is useful, for instance, for browsers that allow characters
// outside this range, for example a '_' (U+005F LOW LINE). See
// http://www.rfc-editor.org/std/std3.txt for more details This option
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
func StrictDomainName(use bool) Option {
return func(o *options) {
o.trie = trie
o.useSTD3Rules = use
o.fromPuny = validateFromPunycode
}
}
// NOTE: the following options pull in tables. The tables should not be linked
// in as long as the options are not used.
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
// that relies on proper validation of labels should include this rule.
func BidiRule() Option {
return func(o *options) { o.bidirule = bidirule.ValidString }
}
// ValidateForRegistration sets validation options to verify that a given IDN is
// properly formatted for registration as defined by Section 4 of RFC 5891.
func ValidateForRegistration() Option {
return func(o *options) {
o.mapping = validateRegistration
StrictDomainName(true)(o)
ValidateLabels(true)(o)
VerifyDNSLength(true)(o)
BidiRule()(o)
}
}
// MapForLookup sets validation and mapping options such that a given IDN is
// transformed for domain name lookup according to the requirements set out in
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
// to add this check.
//
// The mappings include normalization and mapping case, width and other
// compatibility mappings.
func MapForLookup() Option {
return func(o *options) {
o.mapping = validateAndMap
StrictDomainName(true)(o)
ValidateLabels(true)(o)
}
}
type options struct {
transitional bool
useSTD3Rules bool
validateLabels bool
verifyDNSLength bool
trie *idnaTrie
// fromPuny calls validation rules when converting A-labels to U-labels.
fromPuny func(p *Profile, s string) error
// mapping implements a validation and mapping step as defined in RFC 5895
// or UTS 46, tailored to, for example, domain registration or lookup.
mapping func(p *Profile, s string) (string, error)
// bidirule, if specified, checks whether s conforms to the Bidi Rule
// defined in RFC 5893.
bidirule func(s string) bool
}
// A Profile defines the configuration of a IDNA mapper.
type Profile struct {
options
}
func apply(o *options, opts []Option) {
for _, f := range opts {
f(o)
}
}
// New creates a new Profile.
//
// With no options, the returned Profile is the most permissive and equals the
// Punycode Profile. Options can be passed to further restrict the Profile. The
// MapForLookup and ValidateForRegistration options set a collection of options,
// for lookup and registration purposes respectively, which can be tailored by
// adding more fine-grained options, where later options override earlier
// options.
func New(o ...Option) *Profile {
p := &Profile{}
apply(&p.options, o)
return p
}
// ToASCII converts a domain or domain label to its ASCII form. For example,
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
// ToASCII("golang") is "golang". If an error is encountered it will return
// an error and a (partially) processed result.
func (p *Profile) ToASCII(s string) (string, error) {
return p.process(s, true)
}
// ToUnicode converts a domain or domain label to its Unicode form. For example,
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
// ToUnicode("golang") is "golang". If an error is encountered it will return
// an error and a (partially) processed result.
func (p *Profile) ToUnicode(s string) (string, error) {
pp := *p
pp.transitional = false
return pp.process(s, false)
}
// String reports a string with a description of the profile for debugging
// purposes. The string format may change with different versions.
func (p *Profile) String() string {
s := ""
if p.transitional {
s = "Transitional"
} else {
s = "NonTransitional"
}
if p.useSTD3Rules {
s += ":UseSTD3Rules"
}
if p.validateLabels {
s += ":ValidateLabels"
}
if p.verifyDNSLength {
s += ":VerifyDNSLength"
}
return s
}
var (
// Punycode is a Profile that does raw punycode processing with a minimum
// of validation.
Punycode *Profile = punycode
// Lookup is the recommended profile for looking up domain names, according
// to Section 5 of RFC 5891. The exact configuration of this profile may
// change over time.
Lookup *Profile = lookup
// Display is the recommended profile for displaying domain names.
// The configuration of this profile may change over time.
Display *Profile = display
// Registration is the recommended profile for checking whether a given
// IDN is valid for registration, according to Section 4 of RFC 5891.
Registration *Profile = registration
punycode = &Profile{}
lookup = &Profile{options{
transitional: true,
useSTD3Rules: true,
validateLabels: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
display = &Profile{options{
useSTD3Rules: true,
validateLabels: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
registration = &Profile{options{
useSTD3Rules: true,
validateLabels: true,
verifyDNSLength: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateRegistration,
bidirule: bidirule.ValidString,
}}
// TODO: profiles
// Register: recommended for approving domain names: don't do any mappings
// but rather reject on invalid input. Bundle or block deviation characters.
)
type labelError struct{ label, code_ string }
func (e labelError) code() string { return e.code_ }
func (e labelError) Error() string {
return fmt.Sprintf("idna: invalid label %q", e.label)
}
type runeError rune
func (e runeError) code() string { return "P1" }
func (e runeError) Error() string {
return fmt.Sprintf("idna: disallowed rune %U", e)
}
// process implements the algorithm described in section 4 of UTS #46,
// see http://www.unicode.org/reports/tr46.
func (p *Profile) process(s string, toASCII bool) (string, error) {
var err error
if p.mapping != nil {
s, err = p.mapping(p, s)
}
// Remove leading empty labels.
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
}
// It seems like we should only create this error on ToASCII, but the
// UTS 46 conformance tests suggests we should always check this.
if err == nil && p.verifyDNSLength && s == "" {
err = &labelError{s, "A4"}
}
labels := labelIter{orig: s}
for ; !labels.done(); labels.next() {
label := labels.label()
if label == "" {
// Empty labels are not okay. The label iterator skips the last
// label if it is empty.
if err == nil && p.verifyDNSLength {
err = &labelError{s, "A4"}
}
continue
}
if strings.HasPrefix(label, acePrefix) {
u, err2 := decode(label[len(acePrefix):])
if err2 != nil {
if err == nil {
err = err2
}
// Spec says keep the old label.
continue
}
labels.set(u)
if err == nil && p.validateLabels {
err = p.fromPuny(p, u)
}
if err == nil {
// This should be called on NonTransitional, according to the
// spec, but that currently does not have any effect. Use the
// original profile to preserve options.
err = p.validateLabel(u)
}
} else if err == nil {
err = p.validateLabel(label)
}
}
if toASCII {
for labels.reset(); !labels.done(); labels.next() {
label := labels.label()
if !ascii(label) {
a, err2 := encode(acePrefix, label)
if err == nil {
err = err2
}
label = a
labels.set(a)
}
n := len(label)
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
err = &labelError{label, "A4"}
}
}
}
s = labels.result()
if toASCII && p.verifyDNSLength && err == nil {
// Compute the length of the domain name minus the root label and its dot.
n := len(s)
if n > 0 && s[n-1] == '.' {
n--
}
if len(s) < 1 || n > 253 {
err = &labelError{s, "A4"}
}
}
return s, err
}
func normalize(p *Profile, s string) (string, error) {
return norm.NFC.String(s), nil
}
func validateRegistration(p *Profile, s string) (string, error) {
if !norm.NFC.IsNormalString(s) {
return s, &labelError{s, "V1"}
}
var err error
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
i += sz
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
// TODO: handle the NV8 defined in the Unicode idna data set to allow
// for strict conformance to IDNA2008.
case valid, deviation:
case disallowed, mapped, unknown, ignored:
if err == nil {
r, _ := utf8.DecodeRuneInString(s[i:])
err = runeError(r)
}
}
}
return s, err
}
func validateAndMap(p *Profile, s string) (string, error) {
var (
err error
b []byte
k int
)
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
start := i
i += sz
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
case valid:
continue
case disallowed:
if err == nil {
r, _ := utf8.DecodeRuneInString(s[i:])
err = runeError(r)
}
continue
case mapped, deviation:
b = append(b, s[k:start]...)
b = info(v).appendMapping(b, s[start:i])
case ignored:
b = append(b, s[k:start]...)
// drop the rune
case unknown:
b = append(b, s[k:start]...)
b = append(b, "\ufffd"...)
}
k = i
}
if k == 0 {
// No changes so far.
s = norm.NFC.String(s)
} else {
b = append(b, s[k:]...)
if norm.NFC.QuickSpan(b) != len(b) {
b = norm.NFC.Bytes(b)
}
// TODO: the punycode converters require strings as input.
s = string(b)
}
return s, err
}
// A labelIter allows iterating over domain name labels.
type labelIter struct {
orig string
slice []string
curStart int
curEnd int
i int
}
func (l *labelIter) reset() {
l.curStart = 0
l.curEnd = 0
l.i = 0
}
func (l *labelIter) done() bool {
return l.curStart >= len(l.orig)
}
func (l *labelIter) result() string {
if l.slice != nil {
return strings.Join(l.slice, ".")
}
return l.orig
}
func (l *labelIter) label() string {
if l.slice != nil {
return l.slice[l.i]
}
p := strings.IndexByte(l.orig[l.curStart:], '.')
l.curEnd = l.curStart + p
if p == -1 {
l.curEnd = len(l.orig)
}
return l.orig[l.curStart:l.curEnd]
}
// next sets the value to the next label. It skips the last label if it is empty.
func (l *labelIter) next() {
l.i++
if l.slice != nil {
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
l.curStart = len(l.orig)
}
} else {
l.curStart = l.curEnd + 1
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
l.curStart = len(l.orig)
}
}
}
func (l *labelIter) set(s string) {
if l.slice == nil {
l.slice = strings.Split(l.orig, ".")
}
l.slice[l.i] = s
}
// acePrefix is the ASCII Compatible Encoding prefix.
const acePrefix = "xn--"
func (p *Profile) simplify(cat category) category {
switch cat {
case disallowedSTD3Mapped:
if p.useSTD3Rules {
cat = disallowed
} else {
cat = mapped
}
case disallowedSTD3Valid:
if p.useSTD3Rules {
cat = disallowed
} else {
cat = valid
}
case deviation:
if !p.transitional {
cat = valid
}
case validNV8, validXV8:
// TODO: handle V2008
cat = valid
}
return cat
}
func validateFromPunycode(p *Profile, s string) error {
if !norm.NFC.IsNormalString(s) {
return &labelError{s, "V1"}
}
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if c := p.simplify(info(v).category()); c != valid && c != deviation {
return &labelError{s, "V6"}
}
i += sz
}
return nil
}
const (
zwnj = "\u200c"
zwj = "\u200d"
)
type joinState int8
const (
stateStart joinState = iota
stateVirama
stateBefore
stateBeforeVirama
stateAfter
stateFAIL
)
var joinStates = [][numJoinTypes]joinState{
stateStart: {
joiningL: stateBefore,
joiningD: stateBefore,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateVirama,
},
stateVirama: {
joiningL: stateBefore,
joiningD: stateBefore,
},
stateBefore: {
joiningL: stateBefore,
joiningD: stateBefore,
joiningT: stateBefore,
joinZWNJ: stateAfter,
joinZWJ: stateFAIL,
joinVirama: stateBeforeVirama,
},
stateBeforeVirama: {
joiningL: stateBefore,
joiningD: stateBefore,
joiningT: stateBefore,
},
stateAfter: {
joiningL: stateFAIL,
joiningD: stateBefore,
joiningT: stateAfter,
joiningR: stateStart,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateAfter, // no-op as we can't accept joiners here
},
stateFAIL: {
0: stateFAIL,
joiningL: stateFAIL,
joiningD: stateFAIL,
joiningT: stateFAIL,
joiningR: stateFAIL,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateFAIL,
},
}
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
// already implicitly satisfied by the overall implementation.
func (p *Profile) validateLabel(s string) error {
if s == "" {
if p.verifyDNSLength {
return &labelError{s, "A4"}
}
return nil
}
if p.bidirule != nil && !p.bidirule(s) {
return &labelError{s, "B"}
}
if !p.validateLabels {
return nil
}
trie := p.trie // p.validateLabels is only set if trie is set.
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
return &labelError{s, "V2"}
}
if s[0] == '-' || s[len(s)-1] == '-' {
return &labelError{s, "V3"}
}
// TODO: merge the use of this in the trie.
v, sz := trie.lookupString(s)
x := info(v)
if x.isModifier() {
return &labelError{s, "V5"}
}
// Quickly return in the absence of zero-width (non) joiners.
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
return nil
}
st := stateStart
for i := 0; ; {
jt := x.joinType()
if s[i:i+sz] == zwj {
jt = joinZWJ
} else if s[i:i+sz] == zwnj {
jt = joinZWNJ
}
st = joinStates[st][jt]
if x.isViramaModifier() {
st = joinStates[st][joinVirama]
}
if i += sz; i == len(s) {
break
}
v, sz = trie.lookupString(s[i:])
x = info(v)
}
if st == stateFAIL || st == stateAfter {
return &labelError{s, "C"}
}
return nil
}
func ascii(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] >= utf8.RuneSelf {
return false
}
}
return true
}

203
vendor/golang.org/x/net/idna/punycode.go generated vendored Normal file
View file

@ -0,0 +1,203 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package idna
// This file implements the Punycode algorithm from RFC 3492.
import (
"math"
"strings"
"unicode/utf8"
)
// These parameter values are specified in section 5.
//
// All computation is done with int32s, so that overflow behavior is identical
// regardless of whether int is 32-bit or 64-bit.
const (
base int32 = 36
damp int32 = 700
initialBias int32 = 72
initialN int32 = 128
skew int32 = 38
tmax int32 = 26
tmin int32 = 1
)
func punyError(s string) error { return &labelError{s, "A3"} }
// decode decodes a string as specified in section 6.2.
func decode(encoded string) (string, error) {
if encoded == "" {
return "", nil
}
pos := 1 + strings.LastIndex(encoded, "-")
if pos == 1 {
return "", punyError(encoded)
}
if pos == len(encoded) {
return encoded[:len(encoded)-1], nil
}
output := make([]rune, 0, len(encoded))
if pos != 0 {
for _, r := range encoded[:pos-1] {
output = append(output, r)
}
}
i, n, bias := int32(0), initialN, initialBias
for pos < len(encoded) {
oldI, w := i, int32(1)
for k := base; ; k += base {
if pos == len(encoded) {
return "", punyError(encoded)
}
digit, ok := decodeDigit(encoded[pos])
if !ok {
return "", punyError(encoded)
}
pos++
i += digit * w
if i < 0 {
return "", punyError(encoded)
}
t := k - bias
if t < tmin {
t = tmin
} else if t > tmax {
t = tmax
}
if digit < t {
break
}
w *= base - t
if w >= math.MaxInt32/base {
return "", punyError(encoded)
}
}
x := int32(len(output) + 1)
bias = adapt(i-oldI, x, oldI == 0)
n += i / x
i %= x
if n > utf8.MaxRune || len(output) >= 1024 {
return "", punyError(encoded)
}
output = append(output, 0)
copy(output[i+1:], output[i:])
output[i] = n
i++
}
return string(output), nil
}
// encode encodes a string as specified in section 6.3 and prepends prefix to
// the result.
//
// The "while h < length(input)" line in the specification becomes "for
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
func encode(prefix, s string) (string, error) {
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
copy(output, prefix)
delta, n, bias := int32(0), initialN, initialBias
b, remaining := int32(0), int32(0)
for _, r := range s {
if r < 0x80 {
b++
output = append(output, byte(r))
} else {
remaining++
}
}
h := b
if b > 0 {
output = append(output, '-')
}
for remaining != 0 {
m := int32(0x7fffffff)
for _, r := range s {
if m > r && r >= n {
m = r
}
}
delta += (m - n) * (h + 1)
if delta < 0 {
return "", punyError(s)
}
n = m
for _, r := range s {
if r < n {
delta++
if delta < 0 {
return "", punyError(s)
}
continue
}
if r > n {
continue
}
q := delta
for k := base; ; k += base {
t := k - bias
if t < tmin {
t = tmin
} else if t > tmax {
t = tmax
}
if q < t {
break
}
output = append(output, encodeDigit(t+(q-t)%(base-t)))
q = (q - t) / (base - t)
}
output = append(output, encodeDigit(q))
bias = adapt(delta, h+1, h == b)
delta = 0
h++
remaining--
}
delta++
n++
}
return string(output), nil
}
func decodeDigit(x byte) (digit int32, ok bool) {
switch {
case '0' <= x && x <= '9':
return int32(x - ('0' - 26)), true
case 'A' <= x && x <= 'Z':
return int32(x - 'A'), true
case 'a' <= x && x <= 'z':
return int32(x - 'a'), true
}
return 0, false
}
func encodeDigit(digit int32) byte {
switch {
case 0 <= digit && digit < 26:
return byte(digit + 'a')
case 26 <= digit && digit < 36:
return byte(digit + ('0' - 26))
}
panic("idna: internal error in punycode encoding")
}
// adapt is the bias adaptation function specified in section 6.1.
func adapt(delta, numPoints int32, firstTime bool) int32 {
if firstTime {
delta /= damp
} else {
delta /= 2
}
delta += delta / numPoints
k := int32(0)
for delta > ((base-tmin)*tmax)/2 {
delta /= base - tmin
k += base
}
return k + (base-tmin+1)*delta/(delta+skew)
}

4477
vendor/golang.org/x/net/idna/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

72
vendor/golang.org/x/net/idna/trie.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
s := mappings[index:]
return append(b, s[1:s[0]+1]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}
// Sparse block handling code.
type valueRange struct {
value uint16 // header: value:stride
lo, hi byte // header: lo:n
}
type sparseBlocks struct {
values []valueRange
offset []uint16
}
var idnaSparse = sparseBlocks{
values: idnaSparseValues[:],
offset: idnaSparseOffset[:],
}
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
var trie = &idnaTrie{}
// lookup determines the type of block n and looks up the value for b.
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
// is a list of ranges with an accompanying value. Given a matching range r,
// the value for b is by r.value + (b - r.lo) * stride.
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
offset := t.offset[n]
header := t.values[offset]
lo := offset + 1
hi := lo + uint16(header.lo)
for lo < hi {
m := lo + (hi-lo)/2
r := t.values[m]
if r.lo <= b && b <= r.hi {
return r.value + uint16(b-r.lo)*header.value
}
if b < r.lo {
hi = m
} else {
lo = m + 1
}
}
return 0
}

114
vendor/golang.org/x/net/idna/trieval.go generated vendored Normal file
View file

@ -0,0 +1,114 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package idna
// This file contains definitions for interpreting the trie value of the idna
// trie generated by "go run gen*.go". It is shared by both the generator
// program and the resultant package. Sharing is achieved by the generator
// copying gen_trieval.go to trieval.go and changing what's above this comment.
// info holds information from the IDNA mapping table for a single rune. It is
// the value returned by a trie lookup. In most cases, all information fits in
// a 16-bit value. For mappings, this value may contain an index into a slice
// with the mapped string. Such mappings can consist of the actual mapped value
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
// input rune. This technique is used by the cases packages and reduces the
// table size significantly.
//
// The per-rune values have the following format:
//
// if mapped {
// if inlinedXOR {
// 15..13 inline XOR marker
// 12..11 unused
// 10..3 inline XOR mask
// } else {
// 15..3 index into xor or mapping table
// }
// } else {
// 15..13 unused
// 12 modifier (including virama)
// 11 virama modifier
// 10..8 joining type
// 7..3 category type
// }
// 2 use xor pattern
// 1..0 mapped category
//
// See the definitions below for a more detailed description of the various
// bits.
type info uint16
const (
catSmallMask = 0x3
catBigMask = 0xF8
indexShift = 3
xorBit = 0x4 // interpret the index as an xor pattern
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
joinShift = 8
joinMask = 0x07
viramaModifier = 0x0800
modifier = 0x1000
)
// A category corresponds to a category defined in the IDNA mapping table.
type category uint16
const (
unknown category = 0 // not defined currently in unicode.
mapped category = 1
disallowedSTD3Mapped category = 2
deviation category = 3
)
const (
valid category = 0x08
validNV8 category = 0x18
validXV8 category = 0x28
disallowed category = 0x40
disallowedSTD3Valid category = 0x80
ignored category = 0xC0
)
// join types and additional rune information
const (
joiningL = (iota + 1)
joiningD
joiningT
joiningR
//the following types are derived during processing
joinZWJ
joinZWNJ
joinVirama
numJoinTypes
)
func (c info) isMapped() bool {
return c&0x3 != 0
}
func (c info) category() category {
small := c & catSmallMask
if small != 0 {
return category(small)
}
return category(c & catBigMask)
}
func (c info) joinType() info {
if c.isMapped() {
return 0
}
return (c >> joinShift) & joinMask
}
func (c info) isModifier() bool {
return c&(modifier|catSmallMask) == modifier
}
func (c info) isViramaModifier() bool {
return c&(viramaModifier|catSmallMask) == viramaModifier
}

View file

@ -371,7 +371,7 @@ func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observabl
} }
} }
// Failed to find a level that covers the desired range. So just // Failed to find a level that covers the desired range. So just
// extract from the last level, even if it doesn't cover the entire // extract from the last level, even if it doesn't cover the entire
// desired range. // desired range.
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)

View file

@ -10,8 +10,11 @@
package httplex package httplex
import ( import (
"net"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
"golang.org/x/net/idna"
) )
var isTokenTable = [127]bool{ var isTokenTable = [127]bool{
@ -310,3 +313,39 @@ func ValidHeaderFieldValue(v string) bool {
} }
return true return true
} }
func isASCII(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] >= utf8.RuneSelf {
return false
}
}
return true
}
// PunycodeHostPort returns the IDNA Punycode version
// of the provided "host" or "host:port" string.
func PunycodeHostPort(v string) (string, error) {
if isASCII(v) {
return v, nil
}
host, port, err := net.SplitHostPort(v)
if err != nil {
// The input 'v' argument was just a "host" argument,
// without a port. This error should not be returned
// to the caller.
host = v
port = ""
}
host, err = idna.ToASCII(host)
if err != nil {
// Non-UTF-8? Not representable in Punycode, in any
// case.
return "", err
}
if port == "" {
return host, nil
}
return net.JoinHostPort(host, port), nil
}

View file

@ -21,11 +21,6 @@ import (
"time" "time"
) )
var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
"elapsed": elapsed,
"trimSpace": strings.TrimSpace,
}).Parse(eventsHTML))
const maxEventsPerLog = 100 const maxEventsPerLog = 100
type bucket struct { type bucket struct {
@ -101,7 +96,7 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
famMu.RLock() famMu.RLock()
defer famMu.RUnlock() defer famMu.RUnlock()
if err := eventsTmpl.Execute(w, data); err != nil { if err := eventsTmpl().Execute(w, data); err != nil {
log.Printf("net/trace: Failed executing template: %v", err) log.Printf("net/trace: Failed executing template: %v", err)
} }
} }
@ -421,6 +416,19 @@ func freeEventLog(el *eventLog) {
} }
} }
var eventsTmplCache *template.Template
var eventsTmplOnce sync.Once
func eventsTmpl() *template.Template {
eventsTmplOnce.Do(func() {
eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
"elapsed": elapsed,
"trimSpace": strings.TrimSpace,
}).Parse(eventsHTML))
})
return eventsTmplCache
}
const eventsHTML = ` const eventsHTML = `
<html> <html>
<head> <head>

View file

@ -12,6 +12,7 @@ import (
"html/template" "html/template"
"log" "log"
"math" "math"
"sync"
"golang.org/x/net/internal/timeseries" "golang.org/x/net/internal/timeseries"
) )
@ -320,15 +321,20 @@ func (h *histogram) newData() *data {
func (h *histogram) html() template.HTML { func (h *histogram) html() template.HTML {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if err := distTmpl.Execute(buf, h.newData()); err != nil { if err := distTmpl().Execute(buf, h.newData()); err != nil {
buf.Reset() buf.Reset()
log.Printf("net/trace: couldn't execute template: %v", err) log.Printf("net/trace: couldn't execute template: %v", err)
} }
return template.HTML(buf.String()) return template.HTML(buf.String())
} }
// Input: data var distTmplCache *template.Template
var distTmpl = template.Must(template.New("distTmpl").Parse(` var distTmplOnce sync.Once
func distTmpl() *template.Template {
distTmplOnce.Do(func() {
// Input: data
distTmplCache = template.Must(template.New("distTmpl").Parse(`
<table> <table>
<tr> <tr>
<td style="padding:0.25em">Count: {{.Count}}</td> <td style="padding:0.25em">Count: {{.Count}}</td>
@ -354,3 +360,6 @@ var distTmpl = template.Must(template.New("distTmpl").Parse(`
{{end}} {{end}}
</table> </table>
`)) `))
})
return distTmplCache
}

View file

@ -91,7 +91,7 @@ var DebugUseAfterFinish = false
// It returns two bools; the first indicates whether the page may be viewed at all, // It returns two bools; the first indicates whether the page may be viewed at all,
// and the second indicates whether sensitive events will be shown. // and the second indicates whether sensitive events will be shown.
// //
// AuthRequest may be replaced by a program to customise its authorisation requirements. // AuthRequest may be replaced by a program to customize its authorization requirements.
// //
// The default AuthRequest function returns (true, true) if and only if the request // The default AuthRequest function returns (true, true) if and only if the request
// comes from localhost/127.0.0.1/[::1]. // comes from localhost/127.0.0.1/[::1].
@ -238,7 +238,7 @@ func Render(w io.Writer, req *http.Request, sensitive bool) {
completedMu.RLock() completedMu.RLock()
defer completedMu.RUnlock() defer completedMu.RUnlock()
if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil {
log.Printf("net/trace: Failed executing template: %v", err) log.Printf("net/trace: Failed executing template: %v", err)
} }
} }
@ -333,7 +333,8 @@ func New(family, title string) Trace {
tr.ref() tr.ref()
tr.Family, tr.Title = family, title tr.Family, tr.Title = family, title
tr.Start = time.Now() tr.Start = time.Now()
tr.events = make([]event, 0, maxEventsPerTrace) tr.maxEvents = maxEventsPerTrace
tr.events = tr.eventsBuf[:0]
activeMu.RLock() activeMu.RLock()
s := activeTraces[tr.Family] s := activeTraces[tr.Family]
@ -650,8 +651,8 @@ type event struct {
Elapsed time.Duration // since previous event in trace Elapsed time.Duration // since previous event in trace
NewDay bool // whether this event is on a different day to the previous event NewDay bool // whether this event is on a different day to the previous event
Recyclable bool // whether this event was passed via LazyLog Recyclable bool // whether this event was passed via LazyLog
What interface{} // string or fmt.Stringer
Sensitive bool // whether this event contains sensitive information Sensitive bool // whether this event contains sensitive information
What interface{} // string or fmt.Stringer
} }
// WhenString returns a string representation of the elapsed time of the event. // WhenString returns a string representation of the elapsed time of the event.
@ -692,14 +693,17 @@ type trace struct {
IsError bool IsError bool
// Append-only sequence of events (modulo discards). // Append-only sequence of events (modulo discards).
mu sync.RWMutex mu sync.RWMutex
events []event events []event
maxEvents int
refs int32 // how many buckets this is in refs int32 // how many buckets this is in
recycler func(interface{}) recycler func(interface{})
disc discarded // scratch space to avoid allocation disc discarded // scratch space to avoid allocation
finishStack []byte // where finish was called, if DebugUseAfterFinish is set finishStack []byte // where finish was called, if DebugUseAfterFinish is set
eventsBuf [4]event // preallocated buffer in case we only log a few events
} }
func (tr *trace) reset() { func (tr *trace) reset() {
@ -711,11 +715,15 @@ func (tr *trace) reset() {
tr.traceID = 0 tr.traceID = 0
tr.spanID = 0 tr.spanID = 0
tr.IsError = false tr.IsError = false
tr.maxEvents = 0
tr.events = nil tr.events = nil
tr.refs = 0 tr.refs = 0
tr.recycler = nil tr.recycler = nil
tr.disc = 0 tr.disc = 0
tr.finishStack = nil tr.finishStack = nil
for i := range tr.eventsBuf {
tr.eventsBuf[i] = event{}
}
} }
// delta returns the elapsed time since the last event or the trace start, // delta returns the elapsed time since the last event or the trace start,
@ -744,7 +752,7 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
and very unlikely to be the fault of this code. and very unlikely to be the fault of this code.
The most likely scenario is that some code elsewhere is using The most likely scenario is that some code elsewhere is using
a requestz.Trace after its Finish method is called. a trace.Trace after its Finish method is called.
You can temporarily set the DebugUseAfterFinish var You can temporarily set the DebugUseAfterFinish var
to help discover where that is; do not leave that var set, to help discover where that is; do not leave that var set,
since it makes this package much less efficient. since it makes this package much less efficient.
@ -753,11 +761,11 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
tr.mu.Lock() tr.mu.Lock()
e.Elapsed, e.NewDay = tr.delta(e.When) e.Elapsed, e.NewDay = tr.delta(e.When)
if len(tr.events) < cap(tr.events) { if len(tr.events) < tr.maxEvents {
tr.events = append(tr.events, e) tr.events = append(tr.events, e)
} else { } else {
// Discard the middle events. // Discard the middle events.
di := int((cap(tr.events) - 1) / 2) di := int((tr.maxEvents - 1) / 2)
if d, ok := tr.events[di].What.(*discarded); ok { if d, ok := tr.events[di].What.(*discarded); ok {
(*d)++ (*d)++
} else { } else {
@ -777,7 +785,7 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
go tr.recycler(tr.events[di+1].What) go tr.recycler(tr.events[di+1].What)
} }
copy(tr.events[di+1:], tr.events[di+2:]) copy(tr.events[di+1:], tr.events[di+2:])
tr.events[cap(tr.events)-1] = e tr.events[tr.maxEvents-1] = e
} }
tr.mu.Unlock() tr.mu.Unlock()
} }
@ -803,7 +811,7 @@ func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
func (tr *trace) SetMaxEvents(m int) { func (tr *trace) SetMaxEvents(m int) {
// Always keep at least three events: first, discarded count, last. // Always keep at least three events: first, discarded count, last.
if len(tr.events) == 0 && m > 3 { if len(tr.events) == 0 && m > 3 {
tr.events = make([]event, 0, m) tr.maxEvents = m
} }
} }
@ -894,10 +902,18 @@ func elapsed(d time.Duration) string {
return string(b) return string(b)
} }
var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ var pageTmplCache *template.Template
"elapsed": elapsed, var pageTmplOnce sync.Once
"add": func(a, b int) int { return a + b },
}).Parse(pageHTML)) func pageTmpl() *template.Template {
pageTmplOnce.Do(func() {
pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{
"elapsed": elapsed,
"add": func(a, b int) int { return a + b },
}).Parse(pageHTML))
})
return pageTmplCache
}
const pageHTML = ` const pageHTML = `
{{template "Prolog" .}} {{template "Prolog" .}}

27
vendor/golang.org/x/text/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/text/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

13
vendor/golang.org/x/text/doc.go generated vendored Normal file
View file

@ -0,0 +1,13 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// text is a repository of text-related packages related to internationalization
// (i18n) and localization (l10n), such as character encodings, text
// transformations, and locale-specific text handling.
package text
// TODO: more documentation on general concepts, such as Transformers, use
// of normalization, etc.

291
vendor/golang.org/x/text/gen.go generated vendored Normal file
View file

@ -0,0 +1,291 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// gen runs go generate on Unicode- and CLDR-related package in the text
// repositories, taking into account dependencies and versions.
package main
import (
"bytes"
"flag"
"fmt"
"go/build"
"go/format"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"unicode"
"golang.org/x/text/internal/gen"
)
var (
verbose = flag.Bool("v", false, "verbose output")
force = flag.Bool("force", false, "ignore failing dependencies")
doCore = flag.Bool("core", false, "force an update to core")
excludeList = flag.String("exclude", "",
"comma-separated list of packages to exclude")
// The user can specify a selection of packages to build on the command line.
args []string
)
func exclude(pkg string) bool {
if len(args) > 0 {
return !contains(args, pkg)
}
return contains(strings.Split(*excludeList, ","), pkg)
}
// TODO:
// - Better version handling.
// - Generate tables for the core unicode package?
// - Add generation for encodings. This requires some retooling here and there.
// - Running repo-wide "long" tests.
var vprintf = fmt.Printf
func main() {
gen.Init()
args = flag.Args()
if !*verbose {
// Set vprintf to a no-op.
vprintf = func(string, ...interface{}) (int, error) { return 0, nil }
}
// TODO: create temporary cache directory to load files and create and set
// a "cache" option if the user did not specify the UNICODE_DIR environment
// variable. This will prevent duplicate downloads and also will enable long
// tests, which really need to be run after each generated package.
updateCore := *doCore
if gen.UnicodeVersion() != unicode.Version {
fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n",
gen.UnicodeVersion(),
unicode.Version)
// TODO: use collate to compare. Simple comparison will work, though,
// until Unicode reaches version 10. To avoid circular dependencies, we
// could use the NumericWeighter without using package collate using a
// trivial Weighter implementation.
if gen.UnicodeVersion() < unicode.Version && !*force {
os.Exit(2)
}
updateCore = true
}
var unicode = &dependency{}
if updateCore {
fmt.Printf("Updating core to version %s...\n", gen.UnicodeVersion())
unicode = generate("unicode")
// Test some users of the unicode packages, especially the ones that
// keep a mirrored table. These may need to be corrected by hand.
generate("regexp", unicode)
generate("strconv", unicode) // mimics Unicode table
generate("strings", unicode)
generate("testing", unicode) // mimics Unicode table
}
var (
cldr = generate("./unicode/cldr", unicode)
language = generate("./language", cldr)
internal = generate("./internal", unicode, language)
norm = generate("./unicode/norm", unicode)
rangetable = generate("./unicode/rangetable", unicode)
cases = generate("./cases", unicode, norm, language, rangetable)
width = generate("./width", unicode)
bidi = generate("./unicode/bidi", unicode, norm, rangetable)
mib = generate("./encoding/internal/identifier", unicode)
_ = generate("./encoding/htmlindex", unicode, language, mib)
_ = generate("./encoding/ianaindex", unicode, language, mib)
_ = generate("./secure/precis", unicode, norm, rangetable, cases, width, bidi)
_ = generate("./currency", unicode, cldr, language, internal)
_ = generate("./internal/number", unicode, cldr, language, internal)
_ = generate("./internal/export/idna", unicode, bidi, norm)
_ = generate("./language/display", unicode, cldr, language, internal)
_ = generate("./collate", unicode, norm, cldr, language, rangetable)
_ = generate("./search", unicode, norm, cldr, language, rangetable)
)
all.Wait()
// Copy exported packages to the destination golang.org repo.
copyExported("golang.org/x/net/idna")
if updateCore {
copyVendored()
}
if hasErrors {
fmt.Println("FAIL")
os.Exit(1)
}
vprintf("SUCCESS\n")
}
var (
all sync.WaitGroup
hasErrors bool
)
type dependency struct {
sync.WaitGroup
hasErrors bool
}
func generate(pkg string, deps ...*dependency) *dependency {
var wg dependency
if exclude(pkg) {
return &wg
}
wg.Add(1)
all.Add(1)
go func() {
defer wg.Done()
defer all.Done()
// Wait for dependencies to finish.
for _, d := range deps {
d.Wait()
if d.hasErrors && !*force {
fmt.Printf("--- ABORT: %s\n", pkg)
wg.hasErrors = true
return
}
}
vprintf("=== GENERATE %s\n", pkg)
args := []string{"generate"}
if *verbose {
args = append(args, "-v")
}
args = append(args, pkg)
cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
w := &bytes.Buffer{}
cmd.Stderr = w
cmd.Stdout = w
if err := cmd.Run(); err != nil {
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err)
hasErrors = true
wg.hasErrors = true
return
}
vprintf("=== TEST %s\n", pkg)
args[0] = "test"
cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
wt := &bytes.Buffer{}
cmd.Stderr = wt
cmd.Stdout = wt
if err := cmd.Run(); err != nil {
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err)
hasErrors = true
wg.hasErrors = true
return
}
vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w))
fmt.Print(wt.String())
}()
return &wg
}
// copyExported copies a package in x/text/internal/export to the
// destination repository.
func copyExported(p string) {
copyPackage(
filepath.Join("internal", "export", path.Base(p)),
filepath.Join("..", filepath.FromSlash(p[len("golang.org/x"):])),
"golang.org/x/text/internal/export/"+path.Base(p),
p)
}
// copyVendored copies packages used by Go core into the vendored directory.
func copyVendored() {
root := filepath.Join(build.Default.GOROOT, filepath.FromSlash("src/vendor/golang_org/x"))
err := filepath.Walk(root, func(dir string, info os.FileInfo, err error) error {
if err != nil || !info.IsDir() || root == dir {
return err
}
src := dir[len(root)+1:]
const slash = string(filepath.Separator)
if c := strings.Split(src, slash); c[0] == "text" {
// Copy a text repo package from its normal location.
src = strings.Join(c[1:], slash)
} else {
// Copy the vendored package if it exists in the export directory.
src = filepath.Join("internal", "export", filepath.Base(src))
}
copyPackage(src, dir, "golang.org", "golang_org")
return nil
})
if err != nil {
fmt.Printf("Seeding directory %s has failed %v:", root, err)
os.Exit(1)
}
}
// goGenRE is used to remove go:generate lines.
var goGenRE = regexp.MustCompile("//go:generate[^\n]*\n")
// copyPackage copies relevant files from a directory in x/text to the
// destination package directory. The destination package is assumed to have
// the same name. For each copied file go:generate lines are removed and
// and package comments are rewritten to the new path.
func copyPackage(dirSrc, dirDst, search, replace string) {
err := filepath.Walk(dirSrc, func(file string, info os.FileInfo, err error) error {
base := filepath.Base(file)
if err != nil || info.IsDir() ||
!strings.HasSuffix(base, ".go") ||
strings.HasSuffix(base, "_test.go") && !strings.HasPrefix(base, "example") ||
// Don't process subdirectories.
filepath.Dir(file) != dirSrc {
return nil
}
b, err := ioutil.ReadFile(file)
if err != nil || bytes.Contains(b, []byte("\n// +build ignore")) {
return err
}
// Fix paths.
b = bytes.Replace(b, []byte(search), []byte(replace), -1)
// Remove go:generate lines.
b = goGenRE.ReplaceAllLiteral(b, nil)
comment := "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n"
if *doCore {
comment = "// Code generated by running \"go run gen.go -core\" in golang.org/x/text. DO NOT EDIT.\n\n"
}
if !bytes.HasPrefix(b, []byte(comment)) {
b = append([]byte(comment), b...)
}
if b, err = format.Source(b); err != nil {
fmt.Println("Failed to format file:", err)
os.Exit(1)
}
file = filepath.Join(dirDst, base)
vprintf("=== COPY %s\n", file)
return ioutil.WriteFile(file, b, 0666)
})
if err != nil {
fmt.Println("Copying exported files failed:", err)
os.Exit(1)
}
}
func contains(a []string, s string) bool {
for _, e := range a {
if s == e {
return true
}
}
return false
}
func indent(b *bytes.Buffer) string {
return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1)
}

342
vendor/golang.org/x/text/secure/bidirule/bidirule.go generated vendored Normal file
View file

@ -0,0 +1,342 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bidirule implements the Bidi Rule defined by RFC 5893.
//
// This package is under development. The API may change without notice and
// without preserving backward compatibility.
package bidirule
import (
"errors"
"unicode/utf8"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/bidi"
)
// This file contains an implementation of RFC 5893: Right-to-Left Scripts for
// Internationalized Domain Names for Applications (IDNA)
//
// A label is an individual component of a domain name. Labels are usually
// shown separated by dots; for example, the domain name "www.example.com" is
// composed of three labels: "www", "example", and "com".
//
// An RTL label is a label that contains at least one character of class R, AL,
// or AN. An LTR label is any label that is not an RTL label.
//
// A "Bidi domain name" is a domain name that contains at least one RTL label.
//
// The following guarantees can be made based on the above:
//
// o In a domain name consisting of only labels that satisfy the rule,
// the requirements of Section 3 are satisfied. Note that even LTR
// labels and pure ASCII labels have to be tested.
//
// o In a domain name consisting of only LDH labels (as defined in the
// Definitions document [RFC5890]) and labels that satisfy the rule,
// the requirements of Section 3 are satisfied as long as a label
// that starts with an ASCII digit does not come after a
// right-to-left label.
//
// No guarantee is given for other combinations.
// ErrInvalid indicates a label is invalid according to the Bidi Rule.
var ErrInvalid = errors.New("bidirule: failed Bidi Rule")
type ruleState uint8
const (
ruleInitial ruleState = iota
ruleLTR
ruleLTRFinal
ruleRTL
ruleRTLFinal
ruleInvalid
)
type ruleTransition struct {
next ruleState
mask uint16
}
var transitions = [...][2]ruleTransition{
// [2.1] The first character must be a character with Bidi property L, R, or
// AL. If it has the R or AL property, it is an RTL label; if it has the L
// property, it is an LTR label.
ruleInitial: {
{ruleLTRFinal, 1 << bidi.L},
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL},
},
ruleRTL: {
// [2.3] In an RTL label, the end of the label must be a character with
// Bidi property R, AL, EN, or AN, followed by zero or more characters
// with Bidi property NSM.
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN},
// [2.2] In an RTL label, only characters with the Bidi properties R,
// AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed.
// We exclude the entries from [2.3]
{ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM},
},
ruleRTLFinal: {
// [2.3] In an RTL label, the end of the label must be a character with
// Bidi property R, AL, EN, or AN, followed by zero or more characters
// with Bidi property NSM.
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN | 1<<bidi.NSM},
// [2.2] In an RTL label, only characters with the Bidi properties R,
// AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed.
// We exclude the entries from [2.3] and NSM.
{ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN},
},
ruleLTR: {
// [2.6] In an LTR label, the end of the label must be a character with
// Bidi property L or EN, followed by zero or more characters with Bidi
// property NSM.
{ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN},
// [2.5] In an LTR label, only characters with the Bidi properties L,
// EN, ES, CS, ET, ON, BN, or NSM are allowed.
// We exclude the entries from [2.6].
{ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM},
},
ruleLTRFinal: {
// [2.6] In an LTR label, the end of the label must be a character with
// Bidi property L or EN, followed by zero or more characters with Bidi
// property NSM.
{ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN | 1<<bidi.NSM},
// [2.5] In an LTR label, only characters with the Bidi properties L,
// EN, ES, CS, ET, ON, BN, or NSM are allowed.
// We exclude the entries from [2.6].
{ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN},
},
ruleInvalid: {
{ruleInvalid, 0},
{ruleInvalid, 0},
},
}
// [2.4] In an RTL label, if an EN is present, no AN may be present, and
// vice versa.
const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN)
// From RFC 5893
// An RTL label is a label that contains at least one character of type
// R, AL, or AN.
//
// An LTR label is any label that is not an RTL label.
// Direction reports the direction of the given label as defined by RFC 5893.
// The Bidi Rule does not have to be applied to labels of the category
// LeftToRight.
func Direction(b []byte) bidi.Direction {
for i := 0; i < len(b); {
e, sz := bidi.Lookup(b[i:])
if sz == 0 {
i++
}
c := e.Class()
if c == bidi.R || c == bidi.AL || c == bidi.AN {
return bidi.RightToLeft
}
i += sz
}
return bidi.LeftToRight
}
// DirectionString reports the direction of the given label as defined by RFC
// 5893. The Bidi Rule does not have to be applied to labels of the category
// LeftToRight.
func DirectionString(s string) bidi.Direction {
for i := 0; i < len(s); {
e, sz := bidi.LookupString(s[i:])
if sz == 0 {
i++
}
c := e.Class()
if c == bidi.R || c == bidi.AL || c == bidi.AN {
return bidi.RightToLeft
}
i += sz
}
return bidi.LeftToRight
}
// Valid reports whether b conforms to the BiDi rule.
func Valid(b []byte) bool {
var t Transformer
if n, ok := t.advance(b); !ok || n < len(b) {
return false
}
return t.isFinal()
}
// ValidString reports whether s conforms to the BiDi rule.
func ValidString(s string) bool {
var t Transformer
if n, ok := t.advanceString(s); !ok || n < len(s) {
return false
}
return t.isFinal()
}
// New returns a Transformer that verifies that input adheres to the Bidi Rule.
func New() *Transformer {
return &Transformer{}
}
// Transformer implements transform.Transform.
type Transformer struct {
state ruleState
hasRTL bool
seen uint16
}
// A rule can only be violated for "Bidi Domain names", meaning if one of the
// following categories has been observed.
func (t *Transformer) isRTL() bool {
const isRTL = 1<<bidi.R | 1<<bidi.AL | 1<<bidi.AN
return t.seen&isRTL != 0
}
func (t *Transformer) isFinal() bool {
if !t.isRTL() {
return true
}
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
}
// Reset implements transform.Transformer.
func (t *Transformer) Reset() { *t = Transformer{} }
// Transform implements transform.Transformer. This Transformer has state and
// needs to be reset between uses.
func (t *Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(dst) < len(src) {
src = src[:len(dst)]
atEOF = false
err = transform.ErrShortDst
}
n, err1 := t.Span(src, atEOF)
copy(dst, src[:n])
if err == nil || err1 != nil && err1 != transform.ErrShortSrc {
err = err1
}
return n, n, err
}
// Span returns the first n bytes of src that conform to the Bidi rule.
func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) {
if t.state == ruleInvalid && t.isRTL() {
return 0, ErrInvalid
}
n, ok := t.advance(src)
switch {
case !ok:
err = ErrInvalid
case n < len(src):
if !atEOF {
err = transform.ErrShortSrc
break
}
err = ErrInvalid
case !t.isFinal():
err = ErrInvalid
}
return n, err
}
// Precomputing the ASCII values decreases running time for the ASCII fast path
// by about 30%.
var asciiTable [128]bidi.Properties
func init() {
for i := range asciiTable {
p, _ := bidi.LookupRune(rune(i))
asciiTable[i] = p
}
}
func (t *Transformer) advance(s []byte) (n int, ok bool) {
var e bidi.Properties
var sz int
for n < len(s) {
if s[n] < utf8.RuneSelf {
e, sz = asciiTable[s[n]], 1
} else {
e, sz = bidi.Lookup(s[n:])
if sz <= 1 {
if sz == 1 {
// We always consider invalid UTF-8 to be invalid, even if
// the string has not yet been determined to be RTL.
// TODO: is this correct?
return n, false
}
return n, true // incomplete UTF-8 encoding
}
}
// TODO: using CompactClass would result in noticeable speedup.
// See unicode/bidi/prop.go:Properties.CompactClass.
c := uint16(1 << e.Class())
t.seen |= c
if t.seen&exclusiveRTL == exclusiveRTL {
t.state = ruleInvalid
return n, false
}
switch tr := transitions[t.state]; {
case tr[0].mask&c != 0:
t.state = tr[0].next
case tr[1].mask&c != 0:
t.state = tr[1].next
default:
t.state = ruleInvalid
if t.isRTL() {
return n, false
}
}
n += sz
}
return n, true
}
func (t *Transformer) advanceString(s string) (n int, ok bool) {
var e bidi.Properties
var sz int
for n < len(s) {
if s[n] < utf8.RuneSelf {
e, sz = asciiTable[s[n]], 1
} else {
e, sz = bidi.LookupString(s[n:])
if sz <= 1 {
if sz == 1 {
return n, false // invalid UTF-8
}
return n, true // incomplete UTF-8 encoding
}
}
// TODO: using CompactClass results in noticeable speedup.
// See unicode/bidi/prop.go:Properties.CompactClass.
c := uint16(1 << e.Class())
t.seen |= c
if t.seen&exclusiveRTL == exclusiveRTL {
t.state = ruleInvalid
return n, false
}
switch tr := transitions[t.state]; {
case tr[0].mask&c != 0:
t.state = tr[0].next
case tr[1].mask&c != 0:
t.state = tr[1].next
default:
t.state = ruleInvalid
if t.isRTL() {
return n, false
}
}
n += sz
}
return n, true
}

705
vendor/golang.org/x/text/transform/transform.go generated vendored Normal file
View file

@ -0,0 +1,705 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transform provides reader and writer wrappers that transform the
// bytes passing through as well as various transformations. Example
// transformations provided by other packages include normalization and
// conversion between character sets.
package transform // import "golang.org/x/text/transform"
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
var (
// ErrShortDst means that the destination buffer was too short to
// receive all of the transformed bytes.
ErrShortDst = errors.New("transform: short destination buffer")
// ErrShortSrc means that the source buffer has insufficient data to
// complete the transformation.
ErrShortSrc = errors.New("transform: short source buffer")
// ErrEndOfSpan means that the input and output (the transformed input)
// are not identical.
ErrEndOfSpan = errors.New("transform: input and output are not identical")
// errInconsistentByteCount means that Transform returned success (nil
// error) but also returned nSrc inconsistent with the src argument.
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
// errShortInternal means that an internal buffer is not large enough
// to make progress and the Transform operation must be aborted.
errShortInternal = errors.New("transform: short internal buffer")
)
// Transformer transforms bytes.
type Transformer interface {
// Transform writes to dst the transformed bytes read from src, and
// returns the number of dst bytes written and src bytes read. The
// atEOF argument tells whether src represents the last bytes of the
// input.
//
// Callers should always process the nDst bytes produced and account
// for the nSrc bytes consumed before considering the error err.
//
// A nil error means that all of the transformed bytes (whether freshly
// transformed from src or left over from previous Transform calls)
// were written to dst. A nil error can be returned regardless of
// whether atEOF is true. If err is nil then nSrc must equal len(src);
// the converse is not necessarily true.
//
// ErrShortDst means that dst was too short to receive all of the
// transformed bytes. ErrShortSrc means that src had insufficient data
// to complete the transformation. If both conditions apply, then
// either error may be returned. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
// Reset resets the state and allows a Transformer to be reused.
Reset()
}
// SpanningTransformer extends the Transformer interface with a Span method
// that determines how much of the input already conforms to the Transformer.
type SpanningTransformer interface {
Transformer
// Span returns a position in src such that transforming src[:n] results in
// identical output src[:n] for these bytes. It does not necessarily return
// the largest such n. The atEOF argument tells whether src represents the
// last bytes of the input.
//
// Callers should always account for the n bytes consumed before
// considering the error err.
//
// A nil error means that all input bytes are known to be identical to the
// output produced by the Transformer. A nil error can be be returned
// regardless of whether atEOF is true. If err is nil, then then n must
// equal len(src); the converse is not necessarily true.
//
// ErrEndOfSpan means that the Transformer output may differ from the
// input after n bytes. Note that n may be len(src), meaning that the output
// would contain additional bytes after otherwise identical output.
// ErrShortSrc means that src had insufficient data to determine whether the
// remaining bytes would change. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
//
// Calling Span can modify the Transformer state as a side effect. In
// effect, it does the transformation just as calling Transform would, only
// without copying to a destination buffer and only up to a point it can
// determine the input and output bytes are the same. This is obviously more
// limited than calling Transform, but can be more efficient in terms of
// copying and allocating buffers. Calls to Span and Transform may be
// interleaved.
Span(src []byte, atEOF bool) (n int, err error)
}
// NopResetter can be embedded by implementations of Transformer to add a nop
// Reset method.
type NopResetter struct{}
// Reset implements the Reset method of the Transformer interface.
func (NopResetter) Reset() {}
// Reader wraps another io.Reader by transforming the bytes read.
type Reader struct {
r io.Reader
t Transformer
err error
// dst[dst0:dst1] contains bytes that have been transformed by t but
// not yet copied out via Read.
dst []byte
dst0, dst1 int
// src[src0:src1] contains bytes that have been read from r but not
// yet transformed through t.
src []byte
src0, src1 int
// transformComplete is whether the transformation is complete,
// regardless of whether or not it was successful.
transformComplete bool
}
const defaultBufSize = 4096
// NewReader returns a new Reader that wraps r by transforming the bytes read
// via t. It calls Reset on t.
func NewReader(r io.Reader, t Transformer) *Reader {
t.Reset()
return &Reader{
r: r,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Read implements the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
n, err := 0, error(nil)
for {
// Copy out any transformed bytes and return the final error if we are done.
if r.dst0 != r.dst1 {
n = copy(p, r.dst[r.dst0:r.dst1])
r.dst0 += n
if r.dst0 == r.dst1 && r.transformComplete {
return n, r.err
}
return n, nil
} else if r.transformComplete {
return 0, r.err
}
// Try to transform some source bytes, or to flush the transformer if we
// are out of source bytes. We do this even if r.r.Read returned an error.
// As the io.Reader documentation says, "process the n > 0 bytes returned
// before considering the error".
if r.src0 != r.src1 || r.err != nil {
r.dst0 = 0
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
r.src0 += n
switch {
case err == nil:
if r.src0 != r.src1 {
r.err = errInconsistentByteCount
}
// The Transform call was successful; we are complete if we
// cannot read more bytes into src.
r.transformComplete = r.err != nil
continue
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
// Make room in dst by copying out, and try again.
continue
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
// Read more bytes into src via the code below, and try again.
default:
r.transformComplete = true
// The reader error (r.err) takes precedence over the
// transformer error (err) unless r.err is nil or io.EOF.
if r.err == nil || r.err == io.EOF {
r.err = err
}
continue
}
}
// Move any untransformed source bytes to the start of the buffer
// and read more bytes.
if r.src0 != 0 {
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
}
n, r.err = r.r.Read(r.src[r.src1:])
r.src1 += n
}
}
// TODO: implement ReadByte (and ReadRune??).
// Writer wraps another io.Writer by transforming the bytes read.
// The user needs to call Close to flush unwritten bytes that may
// be buffered.
type Writer struct {
w io.Writer
t Transformer
dst []byte
// src[:n] contains bytes that have not yet passed through t.
src []byte
n int
}
// NewWriter returns a new Writer that wraps w by transforming the bytes written
// via t. It calls Reset on t.
func NewWriter(w io.Writer, t Transformer) *Writer {
t.Reset()
return &Writer{
w: w,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Write implements the io.Writer interface. If there are not enough
// bytes available to complete a Transform, the bytes will be buffered
// for the next write. Call Close to convert the remaining bytes.
func (w *Writer) Write(data []byte) (n int, err error) {
src := data
if w.n > 0 {
// Append bytes from data to the last remainder.
// TODO: limit the amount copied on first try.
n = copy(w.src[w.n:], data)
w.n += n
src = w.src[:w.n]
}
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return n, werr
}
src = src[nSrc:]
if w.n == 0 {
n += nSrc
} else if len(src) <= n {
// Enough bytes from w.src have been consumed. We make src point
// to data instead to reduce the copying.
w.n = 0
n -= len(src)
src = data[n:]
if n < len(data) && (err == nil || err == ErrShortSrc) {
continue
}
}
switch err {
case ErrShortDst:
// This error is okay as long as we are making progress.
if nDst > 0 || nSrc > 0 {
continue
}
case ErrShortSrc:
if len(src) < len(w.src) {
m := copy(w.src, src)
// If w.n > 0, bytes from data were already copied to w.src and n
// was already set to the number of bytes consumed.
if w.n == 0 {
n += m
}
w.n = m
err = nil
} else if nDst > 0 || nSrc > 0 {
// Not enough buffer to store the remainder. Keep processing as
// long as there is progress. Without this case, transforms that
// require a lookahead larger than the buffer may result in an
// error. This is not something one may expect to be common in
// practice, but it may occur when buffers are set to small
// sizes during testing.
continue
}
case nil:
if w.n > 0 {
err = errInconsistentByteCount
}
}
return n, err
}
}
// Close implements the io.Closer interface.
func (w *Writer) Close() error {
src := w.src[:w.n]
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return werr
}
if err != ErrShortDst {
return err
}
src = src[nSrc:]
}
}
type nop struct{ NopResetter }
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := copy(dst, src)
if n < len(src) {
err = ErrShortDst
}
return n, n, err
}
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
return len(src), nil
}
type discard struct{ NopResetter }
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return 0, len(src), nil
}
var (
// Discard is a Transformer for which all Transform calls succeed
// by consuming all bytes and writing nothing.
Discard Transformer = discard{}
// Nop is a SpanningTransformer that copies src to dst.
Nop SpanningTransformer = nop{}
)
// chain is a sequence of links. A chain with N Transformers has N+1 links and
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
// buffers owned by the chain. The i'th link transforms bytes from the i'th
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
type chain struct {
link []link
err error
// errStart is the index at which the error occurred plus 1. Processing
// errStart at this level at the next call to Transform. As long as
// errStart > 0, chain will not consume any more source bytes.
errStart int
}
func (c *chain) fatalError(errIndex int, err error) {
if i := errIndex + 1; i > c.errStart {
c.errStart = i
c.err = err
}
}
type link struct {
t Transformer
// b[p:n] holds the bytes to be transformed by t.
b []byte
p int
n int
}
func (l *link) src() []byte {
return l.b[l.p:l.n]
}
func (l *link) dst() []byte {
return l.b[l.n:]
}
// Chain returns a Transformer that applies t in sequence.
func Chain(t ...Transformer) Transformer {
if len(t) == 0 {
return nop{}
}
c := &chain{link: make([]link, len(t)+1)}
for i, tt := range t {
c.link[i].t = tt
}
// Allocate intermediate buffers.
b := make([][defaultBufSize]byte, len(t)-1)
for i := range b {
c.link[i+1].b = b[i][:]
}
return c
}
// Reset resets the state of Chain. It calls Reset on all the Transformers.
func (c *chain) Reset() {
for i, l := range c.link {
if l.t != nil {
l.t.Reset()
}
c.link[i].p, c.link[i].n = 0, 0
}
}
// TODO: make chain use Span (is going to be fun to implement!)
// Transform applies the transformers of c in sequence.
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// Set up src and dst in the chain.
srcL := &c.link[0]
dstL := &c.link[len(c.link)-1]
srcL.b, srcL.p, srcL.n = src, 0, len(src)
dstL.b, dstL.n = dst, 0
var lastFull, needProgress bool // for detecting progress
// i is the index of the next Transformer to apply, for i in [low, high].
// low is the lowest index for which c.link[low] may still produce bytes.
// high is the highest index for which c.link[high] has a Transformer.
// The error returned by Transform determines whether to increase or
// decrease i. We try to completely fill a buffer before converting it.
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
in, out := &c.link[i], &c.link[i+1]
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
out.n += nDst
in.p += nSrc
if i > 0 && in.p == in.n {
in.p, in.n = 0, 0
}
needProgress, lastFull = lastFull, false
switch err0 {
case ErrShortDst:
// Process the destination buffer next. Return if we are already
// at the high index.
if i == high {
return dstL.n, srcL.p, ErrShortDst
}
if out.n != 0 {
i++
// If the Transformer at the next index is not able to process any
// source bytes there is nothing that can be done to make progress
// and the bytes will remain unprocessed. lastFull is used to
// detect this and break out of the loop with a fatal error.
lastFull = true
continue
}
// The destination buffer was too small, but is completely empty.
// Return a fatal error as this transformation can never complete.
c.fatalError(i, errShortInternal)
case ErrShortSrc:
if i == 0 {
// Save ErrShortSrc in err. All other errors take precedence.
err = ErrShortSrc
break
}
// Source bytes were depleted before filling up the destination buffer.
// Verify we made some progress, move the remaining bytes to the errStart
// and try to get more source bytes.
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
// There were not enough source bytes to proceed while the source
// buffer cannot hold any more bytes. Return a fatal error as this
// transformation can never complete.
c.fatalError(i, errShortInternal)
break
}
// in.b is an internal buffer and we can make progress.
in.p, in.n = 0, copy(in.b, in.src())
fallthrough
case nil:
// if i == low, we have depleted the bytes at index i or any lower levels.
// In that case we increase low and i. In all other cases we decrease i to
// fetch more bytes before proceeding to the next index.
if i > low {
i--
continue
}
default:
c.fatalError(i, err0)
}
// Exhausted level low or fatal error: increase low and continue
// to process the bytes accepted so far.
i++
low = i
}
// If c.errStart > 0, this means we found a fatal error. We will clear
// all upstream buffers. At this point, no more progress can be made
// downstream, as Transform would have bailed while handling ErrShortDst.
if c.errStart > 0 {
for i := 1; i < c.errStart; i++ {
c.link[i].p, c.link[i].n = 0, 0
}
err, c.errStart, c.err = c.err, 0, nil
}
return dstL.n, srcL.p, err
}
// Deprecated: use runes.Remove instead.
func RemoveFunc(f func(r rune) bool) Transformer {
return removeF(f)
}
type removeF func(r rune) bool
func (removeF) Reset() {}
// Transform implements the Transformer interface.
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
if r = rune(src[0]); r < utf8.RuneSelf {
sz = 1
} else {
r, sz = utf8.DecodeRune(src)
if sz == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src) {
err = ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(r) {
if nDst+3 > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], "\uFFFD")
}
nSrc++
continue
}
}
if !t(r) {
if nDst+sz > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], src[:sz])
}
nSrc += sz
}
return
}
// grow returns a new []byte that is longer than b, and copies the first n bytes
// of b to the start of the new slice.
func grow(b []byte, n int) []byte {
m := len(b)
if m <= 32 {
m = 64
} else if m <= 256 {
m *= 2
} else {
m += m >> 1
}
buf := make([]byte, m)
copy(buf, b[:n])
return buf
}
const initialBufSize = 128
// String returns a string with the result of converting s[:n] using t, where
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
func String(t Transformer, s string) (result string, n int, err error) {
t.Reset()
if s == "" {
// Fast path for the common case for empty input. Results in about a
// 86% reduction of running time for BenchmarkStringLowerEmpty.
if _, _, err := t.Transform(nil, nil, true); err == nil {
return "", 0, nil
}
}
// Allocate only once. Note that both dst and src escape when passed to
// Transform.
buf := [2 * initialBufSize]byte{}
dst := buf[:initialBufSize:initialBufSize]
src := buf[initialBufSize : 2*initialBufSize]
// The input string s is transformed in multiple chunks (starting with a
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
nDst, nSrc := 0, 0
pDst, pSrc := 0, 0
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
// result will equal the first pPrefix bytes of s. It is not guaranteed to
// be the largest such value, but if pPrefix, len(result) and len(s) are
// all equal after the final transform (i.e. calling Transform with atEOF
// being true returned nil error) then we don't need to allocate a new
// result string.
pPrefix := 0
for {
// Invariant: pDst == pPrefix && pSrc == pPrefix.
n := copy(src, s[pSrc:])
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// TODO: let transformers implement an optional Spanner interface, akin
// to norm's QuickSpan. This would even allow us to avoid any allocation.
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
break
}
pPrefix = pSrc
if err == ErrShortDst {
// A buffer can only be short if a transformer modifies its input.
break
} else if err == ErrShortSrc {
if nSrc == 0 {
// No progress was made.
break
}
// Equal so far and !atEOF, so continue checking.
} else if err != nil || pPrefix == len(s) {
return string(s[:pPrefix]), pPrefix, err
}
}
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
// We have transformed the first pSrc bytes of the input s to become pDst
// transformed bytes. Those transformed bytes are discontiguous: the first
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
// that they become one contiguous slice: dst[:pDst].
if pPrefix != 0 {
newDst := dst
if pDst > len(newDst) {
newDst = make([]byte, len(s)+nDst-nSrc)
}
copy(newDst[pPrefix:pDst], dst[:nDst])
copy(newDst[:pPrefix], s[:pPrefix])
dst = newDst
}
// Prevent duplicate Transform calls with atEOF being true at the end of
// the input. Also return if we have an unrecoverable error.
if (err == nil && pSrc == len(s)) ||
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
return string(dst[:pDst]), pSrc, err
}
// Transform the remaining input, growing dst and src buffers as necessary.
for {
n := copy(src, s[pSrc:])
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
// make progress. This may avoid excessive allocations.
if err == ErrShortDst {
if nDst == 0 {
dst = grow(dst, pDst)
}
} else if err == ErrShortSrc {
if nSrc == 0 {
src = grow(src, 0)
}
} else if err != nil || pSrc == len(s) {
return string(dst[:pDst]), pSrc, err
}
}
}
// Bytes returns a new byte slice with the result of converting b[:n] using t,
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
return doAppend(t, 0, make([]byte, len(b)), b)
}
// Append appends the result of converting src[:n] using t to dst, where
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
if len(dst) == cap(dst) {
n := len(src) + len(dst) // It is okay for this to be 0.
b := make([]byte, n)
dst = b[:copy(b, dst)]
}
return doAppend(t, len(dst), dst[:cap(dst)], src)
}
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
t.Reset()
pSrc := 0
for {
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
pDst += nDst
pSrc += nSrc
if err != ErrShortDst {
return dst[:pDst], pSrc, err
}
// Grow the destination buffer, but do not grow as long as we can make
// progress. This may avoid excessive allocations.
if nDst == 0 {
dst = grow(dst, pDst)
}
}
}

198
vendor/golang.org/x/text/unicode/bidi/bidi.go generated vendored Normal file
View file

@ -0,0 +1,198 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go gen_trieval.go gen_ranges.go
// Package bidi contains functionality for bidirectional text support.
//
// See http://www.unicode.org/reports/tr9.
//
// NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways
// and without notice.
package bidi // import "golang.org/x/text/unicode/bidi"
// TODO:
// The following functionality would not be hard to implement, but hinges on
// the definition of a Segmenter interface. For now this is up to the user.
// - Iterate over paragraphs
// - Segmenter to iterate over runs directly from a given text.
// Also:
// - Transformer for reordering?
// - Transformer (validator, really) for Bidi Rule.
// This API tries to avoid dealing with embedding levels for now. Under the hood
// these will be computed, but the question is to which extent the user should
// know they exist. We should at some point allow the user to specify an
// embedding hierarchy, though.
// A Direction indicates the overall flow of text.
type Direction int
const (
// LeftToRight indicates the text contains no right-to-left characters and
// that either there are some left-to-right characters or the option
// DefaultDirection(LeftToRight) was passed.
LeftToRight Direction = iota
// RightToLeft indicates the text contains no left-to-right characters and
// that either there are some right-to-left characters or the option
// DefaultDirection(RightToLeft) was passed.
RightToLeft
// Mixed indicates text contains both left-to-right and right-to-left
// characters.
Mixed
// Neutral means that text contains no left-to-right and right-to-left
// characters and that no default direction has been set.
Neutral
)
type options struct{}
// An Option is an option for Bidi processing.
type Option func(*options)
// ICU allows the user to define embedding levels. This may be used, for example,
// to use hierarchical structure of markup languages to define embeddings.
// The following option may be a way to expose this functionality in this API.
// // LevelFunc sets a function that associates nesting levels with the given text.
// // The levels function will be called with monotonically increasing values for p.
// func LevelFunc(levels func(p int) int) Option {
// panic("unimplemented")
// }
// DefaultDirection sets the default direction for a Paragraph. The direction is
// overridden if the text contains directional characters.
func DefaultDirection(d Direction) Option {
panic("unimplemented")
}
// A Paragraph holds a single Paragraph for Bidi processing.
type Paragraph struct {
// buffers
}
// SetBytes configures p for the given paragraph text. It replaces text
// previously set by SetBytes or SetString. If b contains a paragraph separator
// it will only process the first paragraph and report the number of bytes
// consumed from b including this separator. Error may be non-nil if options are
// given.
func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) {
panic("unimplemented")
}
// SetString configures p for the given paragraph text. It replaces text
// previously set by SetBytes or SetString. If b contains a paragraph separator
// it will only process the first paragraph and report the number of bytes
// consumed from b including this separator. Error may be non-nil if options are
// given.
func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) {
panic("unimplemented")
}
// IsLeftToRight reports whether the principle direction of rendering for this
// paragraphs is left-to-right. If this returns false, the principle direction
// of rendering is right-to-left.
func (p *Paragraph) IsLeftToRight() bool {
panic("unimplemented")
}
// Direction returns the direction of the text of this paragraph.
//
// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral.
func (p *Paragraph) Direction() Direction {
panic("unimplemented")
}
// RunAt reports the Run at the given position of the input text.
//
// This method can be used for computing line breaks on paragraphs.
func (p *Paragraph) RunAt(pos int) Run {
panic("unimplemented")
}
// Order computes the visual ordering of all the runs in a Paragraph.
func (p *Paragraph) Order() (Ordering, error) {
panic("unimplemented")
}
// Line computes the visual ordering of runs for a single line starting and
// ending at the given positions in the original text.
func (p *Paragraph) Line(start, end int) (Ordering, error) {
panic("unimplemented")
}
// An Ordering holds the computed visual order of runs of a Paragraph. Calling
// SetBytes or SetString on the originating Paragraph invalidates an Ordering.
// The methods of an Ordering should only be called by one goroutine at a time.
type Ordering struct{}
// Direction reports the directionality of the runs.
//
// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral.
func (o *Ordering) Direction() Direction {
panic("unimplemented")
}
// NumRuns returns the number of runs.
func (o *Ordering) NumRuns() int {
panic("unimplemented")
}
// Run returns the ith run within the ordering.
func (o *Ordering) Run(i int) Run {
panic("unimplemented")
}
// TODO: perhaps with options.
// // Reorder creates a reader that reads the runes in visual order per character.
// // Modifiers remain after the runes they modify.
// func (l *Runs) Reorder() io.Reader {
// panic("unimplemented")
// }
// A Run is a continuous sequence of characters of a single direction.
type Run struct {
}
// String returns the text of the run in its original order.
func (r *Run) String() string {
panic("unimplemented")
}
// Bytes returns the text of the run in its original order.
func (r *Run) Bytes() []byte {
panic("unimplemented")
}
// TODO: methods for
// - Display order
// - headers and footers
// - bracket replacement.
// Direction reports the direction of the run.
func (r *Run) Direction() Direction {
panic("unimplemented")
}
// Position of the Run within the text passed to SetBytes or SetString of the
// originating Paragraph value.
func (r *Run) Pos() (start, end int) {
panic("unimplemented")
}
// AppendReverse reverses the order of characters of in, appends them to out,
// and returns the result. Modifiers will still follow the runes they modify.
// Brackets are replaced with their counterparts.
func AppendReverse(out, in []byte) []byte {
panic("unimplemented")
}
// ReverseString reverses the order of characters in s and returns a new string.
// Modifiers will still follow the runes they modify. Brackets are replaced with
// their counterparts.
func ReverseString(s string) string {
panic("unimplemented")
}

335
vendor/golang.org/x/text/unicode/bidi/bracket.go generated vendored Normal file
View file

@ -0,0 +1,335 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bidi
import (
"container/list"
"fmt"
"sort"
)
// This file contains a port of the reference implementation of the
// Bidi Parentheses Algorithm:
// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java
//
// The implementation in this file covers definitions BD14-BD16 and rule N0
// of UAX#9.
//
// Some preprocessing is done for each rune before data is passed to this
// algorithm:
// - opening and closing brackets are identified
// - a bracket pair type, like '(' and ')' is assigned a unique identifier that
// is identical for the opening and closing bracket. It is left to do these
// mappings.
// - The BPA algorithm requires that bracket characters that are canonical
// equivalents of each other be able to be substituted for each other.
// It is the responsibility of the caller to do this canonicalization.
//
// In implementing BD16, this implementation departs slightly from the "logical"
// algorithm defined in UAX#9. In particular, the stack referenced there
// supports operations that go beyond a "basic" stack. An equivalent
// implementation based on a linked list is used here.
// Bidi_Paired_Bracket_Type
// BD14. An opening paired bracket is a character whose
// Bidi_Paired_Bracket_Type property value is Open.
//
// BD15. A closing paired bracket is a character whose
// Bidi_Paired_Bracket_Type property value is Close.
type bracketType byte
const (
bpNone bracketType = iota
bpOpen
bpClose
)
// bracketPair holds a pair of index values for opening and closing bracket
// location of a bracket pair.
type bracketPair struct {
opener int
closer int
}
func (b *bracketPair) String() string {
return fmt.Sprintf("(%v, %v)", b.opener, b.closer)
}
// bracketPairs is a slice of bracketPairs with a sort.Interface implementation.
type bracketPairs []bracketPair
func (b bracketPairs) Len() int { return len(b) }
func (b bracketPairs) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener }
// resolvePairedBrackets runs the paired bracket part of the UBA algorithm.
//
// For each rune, it takes the indexes into the original string, the class the
// bracket type (in pairTypes) and the bracket identifier (pairValues). It also
// takes the direction type for the start-of-sentence and the embedding level.
//
// The identifiers for bracket types are the rune of the canonicalized opening
// bracket for brackets (open or close) or 0 for runes that are not brackets.
func resolvePairedBrackets(s *isolatingRunSequence) {
p := bracketPairer{
sos: s.sos,
openers: list.New(),
codesIsolatedRun: s.types,
indexes: s.indexes,
}
dirEmbed := L
if s.level&1 != 0 {
dirEmbed = R
}
p.locateBrackets(s.p.pairTypes, s.p.pairValues)
p.resolveBrackets(dirEmbed, s.p.initialTypes)
}
type bracketPairer struct {
sos Class // direction corresponding to start of sequence
// The following is a restatement of BD 16 using non-algorithmic language.
//
// A bracket pair is a pair of characters consisting of an opening
// paired bracket and a closing paired bracket such that the
// Bidi_Paired_Bracket property value of the former equals the latter,
// subject to the following constraints.
// - both characters of a pair occur in the same isolating run sequence
// - the closing character of a pair follows the opening character
// - any bracket character can belong at most to one pair, the earliest possible one
// - any bracket character not part of a pair is treated like an ordinary character
// - pairs may nest properly, but their spans may not overlap otherwise
// Bracket characters with canonical decompositions are supposed to be
// treated as if they had been normalized, to allow normalized and non-
// normalized text to give the same result. In this implementation that step
// is pushed out to the caller. The caller has to ensure that the pairValue
// slices contain the rune of the opening bracket after normalization for
// any opening or closing bracket.
openers *list.List // list of positions for opening brackets
// bracket pair positions sorted by location of opening bracket
pairPositions bracketPairs
codesIsolatedRun []Class // directional bidi codes for an isolated run
indexes []int // array of index values into the original string
}
// matchOpener reports whether characters at given positions form a matching
// bracket pair.
func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool {
return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]]
}
const maxPairingDepth = 63
// locateBrackets locates matching bracket pairs according to BD16.
//
// This implementation uses a linked list instead of a stack, because, while
// elements are added at the front (like a push) they are not generally removed
// in atomic 'pop' operations, reducing the benefit of the stack archetype.
func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) {
// traverse the run
// do that explicitly (not in a for-each) so we can record position
for i, index := range p.indexes {
// look at the bracket type for each character
if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON {
// continue scanning
continue
}
switch pairTypes[index] {
case bpOpen:
// check if maximum pairing depth reached
if p.openers.Len() == maxPairingDepth {
p.openers.Init()
return
}
// remember opener location, most recent first
p.openers.PushFront(i)
case bpClose:
// see if there is a match
count := 0
for elem := p.openers.Front(); elem != nil; elem = elem.Next() {
count++
opener := elem.Value.(int)
if p.matchOpener(pairValues, opener, i) {
// if the opener matches, add nested pair to the ordered list
p.pairPositions = append(p.pairPositions, bracketPair{opener, i})
// remove up to and including matched opener
for ; count > 0; count-- {
p.openers.Remove(p.openers.Front())
}
break
}
}
sort.Sort(p.pairPositions)
// if we get here, the closing bracket matched no openers
// and gets ignored
}
}
}
// Bracket pairs within an isolating run sequence are processed as units so
// that both the opening and the closing paired bracket in a pair resolve to
// the same direction.
//
// N0. Process bracket pairs in an isolating run sequence sequentially in
// the logical order of the text positions of the opening paired brackets
// using the logic given below. Within this scope, bidirectional types EN
// and AN are treated as R.
//
// Identify the bracket pairs in the current isolating run sequence
// according to BD16. For each bracket-pair element in the list of pairs of
// text positions:
//
// a Inspect the bidirectional types of the characters enclosed within the
// bracket pair.
//
// b If any strong type (either L or R) matching the embedding direction is
// found, set the type for both brackets in the pair to match the embedding
// direction.
//
// o [ e ] o -> o e e e o
//
// o [ o e ] -> o e o e e
//
// o [ NI e ] -> o e NI e e
//
// c Otherwise, if a strong type (opposite the embedding direction) is
// found, test for adjacent strong types as follows: 1 First, check
// backwards before the opening paired bracket until the first strong type
// (L, R, or sos) is found. If that first preceding strong type is opposite
// the embedding direction, then set the type for both brackets in the pair
// to that type. 2 Otherwise, set the type for both brackets in the pair to
// the embedding direction.
//
// o [ o ] e -> o o o o e
//
// o [ o NI ] o -> o o o NI o o
//
// e [ o ] o -> e e o e o
//
// e [ o ] e -> e e o e e
//
// e ( o [ o ] NI ) e -> e e o o o o NI e e
//
// d Otherwise, do not set the type for the current bracket pair. Note that
// if the enclosed text contains no strong types the paired brackets will
// both resolve to the same level when resolved individually using rules N1
// and N2.
//
// e ( NI ) o -> e ( NI ) o
// getStrongTypeN0 maps character's directional code to strong type as required
// by rule N0.
//
// TODO: have separate type for "strong" directionality.
func (p *bracketPairer) getStrongTypeN0(index int) Class {
switch p.codesIsolatedRun[index] {
// in the scope of N0, number types are treated as R
case EN, AN, AL, R:
return R
case L:
return L
default:
return ON
}
}
// classifyPairContent reports the strong types contained inside a Bracket Pair,
// assuming the given embedding direction.
//
// It returns ON if no strong type is found. If a single strong type is found,
// it returns this this type. Otherwise it returns the embedding direction.
//
// TODO: use separate type for "strong" directionality.
func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class {
dirOpposite := ON
for i := loc.opener + 1; i < loc.closer; i++ {
dir := p.getStrongTypeN0(i)
if dir == ON {
continue
}
if dir == dirEmbed {
return dir // type matching embedding direction found
}
dirOpposite = dir
}
// return ON if no strong type found, or class opposite to dirEmbed
return dirOpposite
}
// classBeforePair determines which strong types are present before a Bracket
// Pair. Return R or L if strong type found, otherwise ON.
func (p *bracketPairer) classBeforePair(loc bracketPair) Class {
for i := loc.opener - 1; i >= 0; i-- {
if dir := p.getStrongTypeN0(i); dir != ON {
return dir
}
}
// no strong types found, return sos
return p.sos
}
// assignBracketType implements rule N0 for a single bracket pair.
func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) {
// rule "N0, a", inspect contents of pair
dirPair := p.classifyPairContent(loc, dirEmbed)
// dirPair is now L, R, or N (no strong type found)
// the following logical tests are performed out of order compared to
// the statement of the rules but yield the same results
if dirPair == ON {
return // case "d" - nothing to do
}
if dirPair != dirEmbed {
// case "c": strong type found, opposite - check before (c.1)
dirPair = p.classBeforePair(loc)
if dirPair == dirEmbed || dirPair == ON {
// no strong opposite type found before - use embedding (c.2)
dirPair = dirEmbed
}
}
// else: case "b", strong type found matching embedding,
// no explicit action needed, as dirPair is already set to embedding
// direction
// set the bracket types to the type found
p.setBracketsToType(loc, dirPair, initialTypes)
}
func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) {
p.codesIsolatedRun[loc.opener] = dirPair
p.codesIsolatedRun[loc.closer] = dirPair
for i := loc.opener + 1; i < loc.closer; i++ {
index := p.indexes[i]
if initialTypes[index] != NSM {
break
}
p.codesIsolatedRun[i] = dirPair
}
for i := loc.closer + 1; i < len(p.indexes); i++ {
index := p.indexes[i]
if initialTypes[index] != NSM {
break
}
p.codesIsolatedRun[i] = dirPair
}
}
// resolveBrackets implements rule N0 for a list of pairs.
func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) {
for _, loc := range p.pairPositions {
p.assignBracketType(loc, dirEmbed, initialTypes)
}
}

1058
vendor/golang.org/x/text/unicode/bidi/core.go generated vendored Normal file

File diff suppressed because it is too large Load diff

133
vendor/golang.org/x/text/unicode/bidi/gen.go generated vendored Normal file
View file

@ -0,0 +1,133 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"flag"
"log"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
var outputFile = flag.String("out", "tables.go", "output file")
func main() {
gen.Init()
gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
genTables()
}
// bidiClass names and codes taken from class "bc" in
// http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
var bidiClass = map[string]Class{
"AL": AL, // ArabicLetter
"AN": AN, // ArabicNumber
"B": B, // ParagraphSeparator
"BN": BN, // BoundaryNeutral
"CS": CS, // CommonSeparator
"EN": EN, // EuropeanNumber
"ES": ES, // EuropeanSeparator
"ET": ET, // EuropeanTerminator
"L": L, // LeftToRight
"NSM": NSM, // NonspacingMark
"ON": ON, // OtherNeutral
"R": R, // RightToLeft
"S": S, // SegmentSeparator
"WS": WS, // WhiteSpace
"FSI": Control,
"PDF": Control,
"PDI": Control,
"LRE": Control,
"LRI": Control,
"LRO": Control,
"RLE": Control,
"RLI": Control,
"RLO": Control,
}
func genTables() {
if numClass > 0x0F {
log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
}
w := gen.NewCodeWriter()
defer w.WriteGoFile(*outputFile, "bidi")
gen.WriteUnicodeVersion(w)
t := triegen.NewTrie("bidi")
// Build data about bracket mapping. These bits need to be or-ed with
// any other bits.
orMask := map[rune]uint64{}
xorMap := map[rune]int{}
xorMasks := []rune{0} // First value is no-op.
ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
r1 := p.Rune(0)
r2 := p.Rune(1)
xor := r1 ^ r2
if _, ok := xorMap[xor]; !ok {
xorMap[xor] = len(xorMasks)
xorMasks = append(xorMasks, xor)
}
entry := uint64(xorMap[xor]) << xorMaskShift
switch p.String(2) {
case "o":
entry |= openMask
case "c", "n":
default:
log.Fatalf("Unknown bracket class %q.", p.String(2))
}
orMask[r1] = entry
})
w.WriteComment(`
xorMasks contains masks to be xor-ed with brackets to get the reverse
version.`)
w.WriteVar("xorMasks", xorMasks)
done := map[rune]bool{}
insert := func(r rune, c Class) {
if !done[r] {
t.Insert(r, orMask[r]|uint64(c))
done[r] = true
}
}
// Insert the derived BiDi properties.
ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
r := p.Rune(0)
class, ok := bidiClass[p.String(1)]
if !ok {
log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
}
insert(r, class)
})
visitDefaults(insert)
// TODO: use sparse blocks. This would reduce table size considerably
// from the looks of it.
sz, err := t.Gen(w)
if err != nil {
log.Fatal(err)
}
w.Size += sz
}
// dummy values to make methods in gen_common compile. The real versions
// will be generated by this file to tables.go.
var (
xorMasks []rune
)

57
vendor/golang.org/x/text/unicode/bidi/gen_ranges.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/ucd"
"golang.org/x/text/unicode/rangetable"
)
// These tables are hand-extracted from:
// http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
func visitDefaults(fn func(r rune, c Class)) {
// first write default values for ranges listed above.
visitRunes(fn, AL, []rune{
0x0600, 0x07BF, // Arabic
0x08A0, 0x08FF, // Arabic Extended-A
0xFB50, 0xFDCF, // Arabic Presentation Forms
0xFDF0, 0xFDFF,
0xFE70, 0xFEFF,
0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
})
visitRunes(fn, R, []rune{
0x0590, 0x05FF, // Hebrew
0x07C0, 0x089F, // Nko et al.
0xFB1D, 0xFB4F,
0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
0x0001E800, 0x0001EDFF,
0x0001EF00, 0x0001EFFF,
})
visitRunes(fn, ET, []rune{ // European Terminator
0x20A0, 0x20Cf, // Currency symbols
})
rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
fn(r, BN) // Boundary Neutral
})
ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
if p.String(1) == "Default_Ignorable_Code_Point" {
fn(p.Rune(0), BN) // Boundary Neutral
}
})
}
func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
for i := 0; i < len(runes); i += 2 {
lo, hi := runes[i], runes[i+1]
for j := lo; j <= hi; j++ {
fn(j, c)
}
}
}

64
vendor/golang.org/x/text/unicode/bidi/gen_trieval.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// Class is the Unicode BiDi class. Each rune has a single class.
type Class uint
const (
L Class = iota // LeftToRight
R // RightToLeft
EN // EuropeanNumber
ES // EuropeanSeparator
ET // EuropeanTerminator
AN // ArabicNumber
CS // CommonSeparator
B // ParagraphSeparator
S // SegmentSeparator
WS // WhiteSpace
ON // OtherNeutral
BN // BoundaryNeutral
NSM // NonspacingMark
AL // ArabicLetter
Control // Control LRO - PDI
numClass
LRO // LeftToRightOverride
RLO // RightToLeftOverride
LRE // LeftToRightEmbedding
RLE // RightToLeftEmbedding
PDF // PopDirectionalFormat
LRI // LeftToRightIsolate
RLI // RightToLeftIsolate
FSI // FirstStrongIsolate
PDI // PopDirectionalIsolate
unknownClass = ^Class(0)
)
var controlToClass = map[rune]Class{
0x202D: LRO, // LeftToRightOverride,
0x202E: RLO, // RightToLeftOverride,
0x202A: LRE, // LeftToRightEmbedding,
0x202B: RLE, // RightToLeftEmbedding,
0x202C: PDF, // PopDirectionalFormat,
0x2066: LRI, // LeftToRightIsolate,
0x2067: RLI, // RightToLeftIsolate,
0x2068: FSI, // FirstStrongIsolate,
0x2069: PDI, // PopDirectionalIsolate,
}
// A trie entry has the following bits:
// 7..5 XOR mask for brackets
// 4 1: Bracket open, 0: Bracket close
// 3..0 Class type
const (
openMask = 0x10
xorMaskShift = 5
)

206
vendor/golang.org/x/text/unicode/bidi/prop.go generated vendored Normal file
View file

@ -0,0 +1,206 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bidi
import "unicode/utf8"
// Properties provides access to BiDi properties of runes.
type Properties struct {
entry uint8
last uint8
}
var trie = newBidiTrie(0)
// TODO: using this for bidirule reduces the running time by about 5%. Consider
// if this is worth exposing or if we can find a way to speed up the Class
// method.
//
// // CompactClass is like Class, but maps all of the BiDi control classes
// // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control.
// func (p Properties) CompactClass() Class {
// return Class(p.entry & 0x0F)
// }
// Class returns the Bidi class for p.
func (p Properties) Class() Class {
c := Class(p.entry & 0x0F)
if c == Control {
c = controlByteToClass[p.last&0xF]
}
return c
}
// IsBracket reports whether the rune is a bracket.
func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 }
// IsOpeningBracket reports whether the rune is an opening bracket.
// IsBracket must return true.
func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 }
// TODO: find a better API and expose.
func (p Properties) reverseBracket(r rune) rune {
return xorMasks[p.entry>>xorMaskShift] ^ r
}
var controlByteToClass = [16]Class{
0xD: LRO, // U+202D LeftToRightOverride,
0xE: RLO, // U+202E RightToLeftOverride,
0xA: LRE, // U+202A LeftToRightEmbedding,
0xB: RLE, // U+202B RightToLeftEmbedding,
0xC: PDF, // U+202C PopDirectionalFormat,
0x6: LRI, // U+2066 LeftToRightIsolate,
0x7: RLI, // U+2067 RightToLeftIsolate,
0x8: FSI, // U+2068 FirstStrongIsolate,
0x9: PDI, // U+2069 PopDirectionalIsolate,
}
// LookupRune returns properties for r.
func LookupRune(r rune) (p Properties, size int) {
var buf [4]byte
n := utf8.EncodeRune(buf[:], r)
return Lookup(buf[:n])
}
// TODO: these lookup methods are based on the generated trie code. The returned
// sizes have slightly different semantics from the generated code, in that it
// always returns size==1 for an illegal UTF-8 byte (instead of the length
// of the maximum invalid subsequence). Most Transformers, like unicode/norm,
// leave invalid UTF-8 untouched, in which case it has performance benefits to
// do so (without changing the semantics). Bidi requires the semantics used here
// for the bidirule implementation to be compatible with the Go semantics.
// They ultimately should perhaps be adopted by all trie implementations, for
// convenience sake.
// This unrolled code also boosts performance of the secure/bidirule package by
// about 30%.
// So, to remove this code:
// - add option to trie generator to define return type.
// - always return 1 byte size for ill-formed UTF-8 runes.
// Lookup returns properties for the first rune in s and the width in bytes of
// its encoding. The size will be 0 if s does not hold enough bytes to complete
// the encoding.
func Lookup(s []byte) (p Properties, sz int) {
c0 := s[0]
switch {
case c0 < 0x80: // is ASCII
return Properties{entry: bidiValues[c0]}, 1
case c0 < 0xC2:
return Properties{}, 1
case c0 < 0xE0: // 2-byte UTF-8
if len(s) < 2 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2
case c0 < 0xF0: // 3-byte UTF-8
if len(s) < 3 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
o := uint32(i)<<6 + uint32(c1)
i = bidiIndex[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3
case c0 < 0xF8: // 4-byte UTF-8
if len(s) < 4 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
o := uint32(i)<<6 + uint32(c1)
i = bidiIndex[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return Properties{}, 1
}
o = uint32(i)<<6 + uint32(c2)
i = bidiIndex[o]
c3 := s[3]
if c3 < 0x80 || 0xC0 <= c3 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4
}
// Illegal rune
return Properties{}, 1
}
// LookupString returns properties for the first rune in s and the width in
// bytes of its encoding. The size will be 0 if s does not hold enough bytes to
// complete the encoding.
func LookupString(s string) (p Properties, sz int) {
c0 := s[0]
switch {
case c0 < 0x80: // is ASCII
return Properties{entry: bidiValues[c0]}, 1
case c0 < 0xC2:
return Properties{}, 1
case c0 < 0xE0: // 2-byte UTF-8
if len(s) < 2 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2
case c0 < 0xF0: // 3-byte UTF-8
if len(s) < 3 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
o := uint32(i)<<6 + uint32(c1)
i = bidiIndex[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3
case c0 < 0xF8: // 4-byte UTF-8
if len(s) < 4 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
o := uint32(i)<<6 + uint32(c1)
i = bidiIndex[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return Properties{}, 1
}
o = uint32(i)<<6 + uint32(c2)
i = bidiIndex[o]
c3 := s[3]
if c3 < 0x80 || 0xC0 <= c3 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4
}
// Illegal rune
return Properties{}, 1
}

1779
vendor/golang.org/x/text/unicode/bidi/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

60
vendor/golang.org/x/text/unicode/bidi/trieval.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package bidi
// Class is the Unicode BiDi class. Each rune has a single class.
type Class uint
const (
L Class = iota // LeftToRight
R // RightToLeft
EN // EuropeanNumber
ES // EuropeanSeparator
ET // EuropeanTerminator
AN // ArabicNumber
CS // CommonSeparator
B // ParagraphSeparator
S // SegmentSeparator
WS // WhiteSpace
ON // OtherNeutral
BN // BoundaryNeutral
NSM // NonspacingMark
AL // ArabicLetter
Control // Control LRO - PDI
numClass
LRO // LeftToRightOverride
RLO // RightToLeftOverride
LRE // LeftToRightEmbedding
RLE // RightToLeftEmbedding
PDF // PopDirectionalFormat
LRI // LeftToRightIsolate
RLI // RightToLeftIsolate
FSI // FirstStrongIsolate
PDI // PopDirectionalIsolate
unknownClass = ^Class(0)
)
var controlToClass = map[rune]Class{
0x202D: LRO, // LeftToRightOverride,
0x202E: RLO, // RightToLeftOverride,
0x202A: LRE, // LeftToRightEmbedding,
0x202B: RLE, // RightToLeftEmbedding,
0x202C: PDF, // PopDirectionalFormat,
0x2066: LRI, // LeftToRightIsolate,
0x2067: RLI, // RightToLeftIsolate,
0x2068: FSI, // FirstStrongIsolate,
0x2069: PDI, // PopDirectionalIsolate,
}
// A trie entry has the following bits:
// 7..5 XOR mask for brackets
// 4 1: Bracket open, 0: Bracket close
// 3..0 Class type
const (
openMask = 0x10
xorMaskShift = 5
)

514
vendor/golang.org/x/text/unicode/norm/composition.go generated vendored Normal file
View file

@ -0,0 +1,514 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "unicode/utf8"
const (
maxNonStarters = 30
// The maximum number of characters needed for a buffer is
// maxNonStarters + 1 for the starter + 1 for the GCJ
maxBufferSize = maxNonStarters + 2
maxNFCExpansion = 3 // NFC(0x1D160)
maxNFKCExpansion = 18 // NFKC(0xFDFA)
maxByteBufferSize = utf8.UTFMax * maxBufferSize // 128
)
// ssState is used for reporting the segment state after inserting a rune.
// It is returned by streamSafe.next.
type ssState int
const (
// Indicates a rune was successfully added to the segment.
ssSuccess ssState = iota
// Indicates a rune starts a new segment and should not be added.
ssStarter
// Indicates a rune caused a segment overflow and a CGJ should be inserted.
ssOverflow
)
// streamSafe implements the policy of when a CGJ should be inserted.
type streamSafe uint8
// mkStreamSafe is a shorthand for declaring a streamSafe var and calling
// first on it.
func mkStreamSafe(p Properties) streamSafe {
return streamSafe(p.nTrailingNonStarters())
}
// first inserts the first rune of a segment.
func (ss *streamSafe) first(p Properties) {
if *ss != 0 {
panic("!= 0")
}
*ss = streamSafe(p.nTrailingNonStarters())
}
// insert returns a ssState value to indicate whether a rune represented by p
// can be inserted.
func (ss *streamSafe) next(p Properties) ssState {
if *ss > maxNonStarters {
panic("streamSafe was not reset")
}
n := p.nLeadingNonStarters()
if *ss += streamSafe(n); *ss > maxNonStarters {
*ss = 0
return ssOverflow
}
// The Stream-Safe Text Processing prescribes that the counting can stop
// as soon as a starter is encountered. However, there are some starters,
// like Jamo V and T, that can combine with other runes, leaving their
// successive non-starters appended to the previous, possibly causing an
// overflow. We will therefore consider any rune with a non-zero nLead to
// be a non-starter. Note that it always hold that if nLead > 0 then
// nLead == nTrail.
if n == 0 {
*ss = 0
return ssStarter
}
return ssSuccess
}
// backwards is used for checking for overflow and segment starts
// when traversing a string backwards. Users do not need to call first
// for the first rune. The state of the streamSafe retains the count of
// the non-starters loaded.
func (ss *streamSafe) backwards(p Properties) ssState {
if *ss > maxNonStarters {
panic("streamSafe was not reset")
}
c := *ss + streamSafe(p.nTrailingNonStarters())
if c > maxNonStarters {
return ssOverflow
}
*ss = c
if p.nLeadingNonStarters() == 0 {
return ssStarter
}
return ssSuccess
}
func (ss streamSafe) isMax() bool {
return ss == maxNonStarters
}
// GraphemeJoiner is inserted after maxNonStarters non-starter runes.
const GraphemeJoiner = "\u034F"
// reorderBuffer is used to normalize a single segment. Characters inserted with
// insert are decomposed and reordered based on CCC. The compose method can
// be used to recombine characters. Note that the byte buffer does not hold
// the UTF-8 characters in order. Only the rune array is maintained in sorted
// order. flush writes the resulting segment to a byte array.
type reorderBuffer struct {
rune [maxBufferSize]Properties // Per character info.
byte [maxByteBufferSize]byte // UTF-8 buffer. Referenced by runeInfo.pos.
nbyte uint8 // Number or bytes.
ss streamSafe // For limiting length of non-starter sequence.
nrune int // Number of runeInfos.
f formInfo
src input
nsrc int
tmpBytes input
out []byte
flushF func(*reorderBuffer) bool
}
func (rb *reorderBuffer) init(f Form, src []byte) {
rb.f = *formTable[f]
rb.src.setBytes(src)
rb.nsrc = len(src)
rb.ss = 0
}
func (rb *reorderBuffer) initString(f Form, src string) {
rb.f = *formTable[f]
rb.src.setString(src)
rb.nsrc = len(src)
rb.ss = 0
}
func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
rb.out = out
rb.flushF = f
}
// reset discards all characters from the buffer.
func (rb *reorderBuffer) reset() {
rb.nrune = 0
rb.nbyte = 0
rb.ss = 0
}
func (rb *reorderBuffer) doFlush() bool {
if rb.f.composing {
rb.compose()
}
res := rb.flushF(rb)
rb.reset()
return res
}
// appendFlush appends the normalized segment to rb.out.
func appendFlush(rb *reorderBuffer) bool {
for i := 0; i < rb.nrune; i++ {
start := rb.rune[i].pos
end := start + rb.rune[i].size
rb.out = append(rb.out, rb.byte[start:end]...)
}
return true
}
// flush appends the normalized segment to out and resets rb.
func (rb *reorderBuffer) flush(out []byte) []byte {
for i := 0; i < rb.nrune; i++ {
start := rb.rune[i].pos
end := start + rb.rune[i].size
out = append(out, rb.byte[start:end]...)
}
rb.reset()
return out
}
// flushCopy copies the normalized segment to buf and resets rb.
// It returns the number of bytes written to buf.
func (rb *reorderBuffer) flushCopy(buf []byte) int {
p := 0
for i := 0; i < rb.nrune; i++ {
runep := rb.rune[i]
p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size])
}
rb.reset()
return p
}
// insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class.
// It returns false if the buffer is not large enough to hold the rune.
// It is used internally by insert and insertString only.
func (rb *reorderBuffer) insertOrdered(info Properties) {
n := rb.nrune
b := rb.rune[:]
cc := info.ccc
if cc > 0 {
// Find insertion position + move elements to make room.
for ; n > 0; n-- {
if b[n-1].ccc <= cc {
break
}
b[n] = b[n-1]
}
}
rb.nrune += 1
pos := uint8(rb.nbyte)
rb.nbyte += utf8.UTFMax
info.pos = pos
b[n] = info
}
// insertErr is an error code returned by insert. Using this type instead
// of error improves performance up to 20% for many of the benchmarks.
type insertErr int
const (
iSuccess insertErr = -iota
iShortDst
iShortSrc
)
// insertFlush inserts the given rune in the buffer ordered by CCC.
// If a decomposition with multiple segments are encountered, they leading
// ones are flushed.
// It returns a non-zero error code if the rune was not inserted.
func (rb *reorderBuffer) insertFlush(src input, i int, info Properties) insertErr {
if rune := src.hangul(i); rune != 0 {
rb.decomposeHangul(rune)
return iSuccess
}
if info.hasDecomposition() {
return rb.insertDecomposed(info.Decomposition())
}
rb.insertSingle(src, i, info)
return iSuccess
}
// insertUnsafe inserts the given rune in the buffer ordered by CCC.
// It is assumed there is sufficient space to hold the runes. It is the
// responsibility of the caller to ensure this. This can be done by checking
// the state returned by the streamSafe type.
func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
if rune := src.hangul(i); rune != 0 {
rb.decomposeHangul(rune)
}
if info.hasDecomposition() {
// TODO: inline.
rb.insertDecomposed(info.Decomposition())
} else {
rb.insertSingle(src, i, info)
}
}
// insertDecomposed inserts an entry in to the reorderBuffer for each rune
// in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes.
// It flushes the buffer on each new segment start.
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
rb.tmpBytes.setBytes(dcomp)
for i := 0; i < len(dcomp); {
info := rb.f.info(rb.tmpBytes, i)
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {
return iShortDst
}
i += copy(rb.byte[rb.nbyte:], dcomp[i:i+int(info.size)])
rb.insertOrdered(info)
}
return iSuccess
}
// insertSingle inserts an entry in the reorderBuffer for the rune at
// position i. info is the runeInfo for the rune at position i.
func (rb *reorderBuffer) insertSingle(src input, i int, info Properties) {
src.copySlice(rb.byte[rb.nbyte:], i, i+int(info.size))
rb.insertOrdered(info)
}
// insertCGJ inserts a Combining Grapheme Joiner (0x034f) into rb.
func (rb *reorderBuffer) insertCGJ() {
rb.insertSingle(input{str: GraphemeJoiner}, 0, Properties{size: uint8(len(GraphemeJoiner))})
}
// appendRune inserts a rune at the end of the buffer. It is used for Hangul.
func (rb *reorderBuffer) appendRune(r rune) {
bn := rb.nbyte
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
rb.nbyte += utf8.UTFMax
rb.rune[rb.nrune] = Properties{pos: bn, size: uint8(sz)}
rb.nrune++
}
// assignRune sets a rune at position pos. It is used for Hangul and recomposition.
func (rb *reorderBuffer) assignRune(pos int, r rune) {
bn := rb.rune[pos].pos
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
rb.rune[pos] = Properties{pos: bn, size: uint8(sz)}
}
// runeAt returns the rune at position n. It is used for Hangul and recomposition.
func (rb *reorderBuffer) runeAt(n int) rune {
inf := rb.rune[n]
r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size])
return r
}
// bytesAt returns the UTF-8 encoding of the rune at position n.
// It is used for Hangul and recomposition.
func (rb *reorderBuffer) bytesAt(n int) []byte {
inf := rb.rune[n]
return rb.byte[inf.pos : int(inf.pos)+int(inf.size)]
}
// For Hangul we combine algorithmically, instead of using tables.
const (
hangulBase = 0xAC00 // UTF-8(hangulBase) -> EA B0 80
hangulBase0 = 0xEA
hangulBase1 = 0xB0
hangulBase2 = 0x80
hangulEnd = hangulBase + jamoLVTCount // UTF-8(0xD7A4) -> ED 9E A4
hangulEnd0 = 0xED
hangulEnd1 = 0x9E
hangulEnd2 = 0xA4
jamoLBase = 0x1100 // UTF-8(jamoLBase) -> E1 84 00
jamoLBase0 = 0xE1
jamoLBase1 = 0x84
jamoLEnd = 0x1113
jamoVBase = 0x1161
jamoVEnd = 0x1176
jamoTBase = 0x11A7
jamoTEnd = 0x11C3
jamoTCount = 28
jamoVCount = 21
jamoVTCount = 21 * 28
jamoLVTCount = 19 * 21 * 28
)
const hangulUTF8Size = 3
func isHangul(b []byte) bool {
if len(b) < hangulUTF8Size {
return false
}
b0 := b[0]
if b0 < hangulBase0 {
return false
}
b1 := b[1]
switch {
case b0 == hangulBase0:
return b1 >= hangulBase1
case b0 < hangulEnd0:
return true
case b0 > hangulEnd0:
return false
case b1 < hangulEnd1:
return true
}
return b1 == hangulEnd1 && b[2] < hangulEnd2
}
func isHangulString(b string) bool {
if len(b) < hangulUTF8Size {
return false
}
b0 := b[0]
if b0 < hangulBase0 {
return false
}
b1 := b[1]
switch {
case b0 == hangulBase0:
return b1 >= hangulBase1
case b0 < hangulEnd0:
return true
case b0 > hangulEnd0:
return false
case b1 < hangulEnd1:
return true
}
return b1 == hangulEnd1 && b[2] < hangulEnd2
}
// Caller must ensure len(b) >= 2.
func isJamoVT(b []byte) bool {
// True if (rune & 0xff00) == jamoLBase
return b[0] == jamoLBase0 && (b[1]&0xFC) == jamoLBase1
}
func isHangulWithoutJamoT(b []byte) bool {
c, _ := utf8.DecodeRune(b)
c -= hangulBase
return c < jamoLVTCount && c%jamoTCount == 0
}
// decomposeHangul writes the decomposed Hangul to buf and returns the number
// of bytes written. len(buf) should be at least 9.
func decomposeHangul(buf []byte, r rune) int {
const JamoUTF8Len = 3
r -= hangulBase
x := r % jamoTCount
r /= jamoTCount
utf8.EncodeRune(buf, jamoLBase+r/jamoVCount)
utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount)
if x != 0 {
utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x)
return 3 * JamoUTF8Len
}
return 2 * JamoUTF8Len
}
// decomposeHangul algorithmically decomposes a Hangul rune into
// its Jamo components.
// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
func (rb *reorderBuffer) decomposeHangul(r rune) {
r -= hangulBase
x := r % jamoTCount
r /= jamoTCount
rb.appendRune(jamoLBase + r/jamoVCount)
rb.appendRune(jamoVBase + r%jamoVCount)
if x != 0 {
rb.appendRune(jamoTBase + x)
}
}
// combineHangul algorithmically combines Jamo character components into Hangul.
// See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul.
func (rb *reorderBuffer) combineHangul(s, i, k int) {
b := rb.rune[:]
bn := rb.nrune
for ; i < bn; i++ {
cccB := b[k-1].ccc
cccC := b[i].ccc
if cccB == 0 {
s = k - 1
}
if s != k-1 && cccB >= cccC {
// b[i] is blocked by greater-equal cccX below it
b[k] = b[i]
k++
} else {
l := rb.runeAt(s) // also used to compare to hangulBase
v := rb.runeAt(i) // also used to compare to jamoT
switch {
case jamoLBase <= l && l < jamoLEnd &&
jamoVBase <= v && v < jamoVEnd:
// 11xx plus 116x to LV
rb.assignRune(s, hangulBase+
(l-jamoLBase)*jamoVTCount+(v-jamoVBase)*jamoTCount)
case hangulBase <= l && l < hangulEnd &&
jamoTBase < v && v < jamoTEnd &&
((l-hangulBase)%jamoTCount) == 0:
// ACxx plus 11Ax to LVT
rb.assignRune(s, l+v-jamoTBase)
default:
b[k] = b[i]
k++
}
}
}
rb.nrune = k
}
// compose recombines the runes in the buffer.
// It should only be used to recompose a single segment, as it will not
// handle alternations between Hangul and non-Hangul characters correctly.
func (rb *reorderBuffer) compose() {
// UAX #15, section X5 , including Corrigendum #5
// "In any character sequence beginning with starter S, a character C is
// blocked from S if and only if there is some character B between S
// and C, and either B is a starter or it has the same or higher
// combining class as C."
bn := rb.nrune
if bn == 0 {
return
}
k := 1
b := rb.rune[:]
for s, i := 0, 1; i < bn; i++ {
if isJamoVT(rb.bytesAt(i)) {
// Redo from start in Hangul mode. Necessary to support
// U+320E..U+321E in NFKC mode.
rb.combineHangul(s, i, k)
return
}
ii := b[i]
// We can only use combineForward as a filter if we later
// get the info for the combined character. This is more
// expensive than using the filter. Using combinesBackward()
// is safe.
if ii.combinesBackward() {
cccB := b[k-1].ccc
cccC := ii.ccc
blocked := false // b[i] blocked by starter or greater or equal CCC?
if cccB == 0 {
s = k - 1
} else {
blocked = s != k-1 && cccB >= cccC
}
if !blocked {
combined := combine(rb.runeAt(s), rb.runeAt(i))
if combined != 0 {
rb.assignRune(s, combined)
continue
}
}
}
b[k] = b[i]
k++
}
rb.nrune = k
}

259
vendor/golang.org/x/text/unicode/norm/forminfo.go generated vendored Normal file
View file

@ -0,0 +1,259 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
// This file contains Form-specific logic and wrappers for data in tables.go.
// Rune info is stored in a separate trie per composing form. A composing form
// and its corresponding decomposing form share the same trie. Each trie maps
// a rune to a uint16. The values take two forms. For v >= 0x8000:
// bits
// 15: 1 (inverse of NFD_QC bit of qcInfo)
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
// 6..0: ccc (compressed CCC value).
// For v < 0x8000, the respective rune has a decomposition and v is an index
// into a byte array of UTF-8 decomposition sequences and additional info and
// has the form:
// <header> <decomp_byte>* [<tccc> [<lccc>]]
// The header contains the number of bytes in the decomposition (excluding this
// length byte). The two most significant bits of this length byte correspond
// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1.
// The byte sequence is followed by a trailing and leading CCC if the values
// for these are not zero. The value of v determines which ccc are appended
// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
// there is an additional leading ccc. The value of tccc itself is the
// trailing CCC shifted left 2 bits. The two least-significant bits of tccc
// are the number of trailing non-starters.
const (
qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo
headerLenMask = 0x3F // extract the length value from the header byte
headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
)
// Properties provides access to normalization properties of a rune.
type Properties struct {
pos uint8 // start position in reorderBuffer; used in composition.go
size uint8 // length of UTF-8 encoding of this rune
ccc uint8 // leading canonical combining class (ccc if not decomposition)
tccc uint8 // trailing canonical combining class (ccc if not decomposition)
nLead uint8 // number of leading non-starters.
flags qcInfo // quick check flags
index uint16
}
// functions dispatchable per form
type lookupFunc func(b input, i int) Properties
// formInfo holds Form-specific functions and tables.
type formInfo struct {
form Form
composing, compatibility bool // form type
info lookupFunc
nextMain iterFunc
}
var formTable = []*formInfo{{
form: NFC,
composing: true,
compatibility: false,
info: lookupInfoNFC,
nextMain: nextComposed,
}, {
form: NFD,
composing: false,
compatibility: false,
info: lookupInfoNFC,
nextMain: nextDecomposed,
}, {
form: NFKC,
composing: true,
compatibility: true,
info: lookupInfoNFKC,
nextMain: nextComposed,
}, {
form: NFKD,
composing: false,
compatibility: true,
info: lookupInfoNFKC,
nextMain: nextDecomposed,
}}
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
// unexpected behavior for the user. For example, in NFD, there is a boundary
// after 'a'. However, 'a' might combine with modifiers, so from the application's
// perspective it is not a good boundary. We will therefore always use the
// boundaries for the combining variants.
// BoundaryBefore returns true if this rune starts a new segment and
// cannot combine with any rune on the left.
func (p Properties) BoundaryBefore() bool {
if p.ccc == 0 && !p.combinesBackward() {
return true
}
// We assume that the CCC of the first character in a decomposition
// is always non-zero if different from info.ccc and that we can return
// false at this point. This is verified by maketables.
return false
}
// BoundaryAfter returns true if runes cannot combine with or otherwise
// interact with this or previous runes.
func (p Properties) BoundaryAfter() bool {
// TODO: loosen these conditions.
return p.isInert()
}
// We pack quick check data in 4 bits:
// 5: Combines forward (0 == false, 1 == true)
// 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
// 1..0: Number of trailing non-starters.
//
// When all 4 bits are zero, the character is inert, meaning it is never
// influenced by normalization.
type qcInfo uint8
func (p Properties) isYesC() bool { return p.flags&0x10 == 0 }
func (p Properties) isYesD() bool { return p.flags&0x4 == 0 }
func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 }
func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe
func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD
func (p Properties) isInert() bool {
return p.flags&qcInfoMask == 0 && p.ccc == 0
}
func (p Properties) multiSegment() bool {
return p.index >= firstMulti && p.index < endMulti
}
func (p Properties) nLeadingNonStarters() uint8 {
return p.nLead
}
func (p Properties) nTrailingNonStarters() uint8 {
return uint8(p.flags & 0x03)
}
// Decomposition returns the decomposition for the underlying rune
// or nil if there is none.
func (p Properties) Decomposition() []byte {
// TODO: create the decomposition for Hangul?
if p.index == 0 {
return nil
}
i := p.index
n := decomps[i] & headerLenMask
i++
return decomps[i : i+uint16(n)]
}
// Size returns the length of UTF-8 encoding of the rune.
func (p Properties) Size() int {
return int(p.size)
}
// CCC returns the canonical combining class of the underlying rune.
func (p Properties) CCC() uint8 {
if p.index >= firstCCCZeroExcept {
return 0
}
return ccc[p.ccc]
}
// LeadCCC returns the CCC of the first rune in the decomposition.
// If there is no decomposition, LeadCCC equals CCC.
func (p Properties) LeadCCC() uint8 {
return ccc[p.ccc]
}
// TrailCCC returns the CCC of the last rune in the decomposition.
// If there is no decomposition, TrailCCC equals CCC.
func (p Properties) TrailCCC() uint8 {
return ccc[p.tccc]
}
// Recomposition
// We use 32-bit keys instead of 64-bit for the two codepoint keys.
// This clips off the bits of three entries, but we know this will not
// result in a collision. In the unlikely event that changes to
// UnicodeData.txt introduce collisions, the compiler will catch it.
// Note that the recomposition map for NFC and NFKC are identical.
// combine returns the combined rune or 0 if it doesn't exist.
func combine(a, b rune) rune {
key := uint32(uint16(a))<<16 + uint32(uint16(b))
return recompMap[key]
}
func lookupInfoNFC(b input, i int) Properties {
v, sz := b.charinfoNFC(i)
return compInfo(v, sz)
}
func lookupInfoNFKC(b input, i int) Properties {
v, sz := b.charinfoNFKC(i)
return compInfo(v, sz)
}
// Properties returns properties for the first rune in s.
func (f Form) Properties(s []byte) Properties {
if f == NFC || f == NFD {
return compInfo(nfcData.lookup(s))
}
return compInfo(nfkcData.lookup(s))
}
// PropertiesString returns properties for the first rune in s.
func (f Form) PropertiesString(s string) Properties {
if f == NFC || f == NFD {
return compInfo(nfcData.lookupString(s))
}
return compInfo(nfkcData.lookupString(s))
}
// compInfo converts the information contained in v and sz
// to a Properties. See the comment at the top of the file
// for more information on the format.
func compInfo(v uint16, sz int) Properties {
if v == 0 {
return Properties{size: uint8(sz)}
} else if v >= 0x8000 {
p := Properties{
size: uint8(sz),
ccc: uint8(v),
tccc: uint8(v),
flags: qcInfo(v >> 8),
}
if p.ccc > 0 || p.combinesBackward() {
p.nLead = uint8(p.flags & 0x3)
}
return p
}
// has decomposition
h := decomps[v]
f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4
p := Properties{size: uint8(sz), flags: f, index: v}
if v >= firstCCC {
v += uint16(h&headerLenMask) + 1
c := decomps[v]
p.tccc = c >> 2
p.flags |= qcInfo(c & 0x3)
if v >= firstLeadingCCC {
p.nLead = c & 0x3
if v >= firstStarterWithNLead {
// We were tricked. Remove the decomposition.
p.flags &= 0x03
p.index = 0
return p
}
p.ccc = decomps[v+1]
}
}
return p
}

105
vendor/golang.org/x/text/unicode/norm/input.go generated vendored Normal file
View file

@ -0,0 +1,105 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "unicode/utf8"
type input struct {
str string
bytes []byte
}
func inputBytes(str []byte) input {
return input{bytes: str}
}
func inputString(str string) input {
return input{str: str}
}
func (in *input) setBytes(str []byte) {
in.str = ""
in.bytes = str
}
func (in *input) setString(str string) {
in.str = str
in.bytes = nil
}
func (in *input) _byte(p int) byte {
if in.bytes == nil {
return in.str[p]
}
return in.bytes[p]
}
func (in *input) skipASCII(p, max int) int {
if in.bytes == nil {
for ; p < max && in.str[p] < utf8.RuneSelf; p++ {
}
} else {
for ; p < max && in.bytes[p] < utf8.RuneSelf; p++ {
}
}
return p
}
func (in *input) skipContinuationBytes(p int) int {
if in.bytes == nil {
for ; p < len(in.str) && !utf8.RuneStart(in.str[p]); p++ {
}
} else {
for ; p < len(in.bytes) && !utf8.RuneStart(in.bytes[p]); p++ {
}
}
return p
}
func (in *input) appendSlice(buf []byte, b, e int) []byte {
if in.bytes != nil {
return append(buf, in.bytes[b:e]...)
}
for i := b; i < e; i++ {
buf = append(buf, in.str[i])
}
return buf
}
func (in *input) copySlice(buf []byte, b, e int) int {
if in.bytes == nil {
return copy(buf, in.str[b:e])
}
return copy(buf, in.bytes[b:e])
}
func (in *input) charinfoNFC(p int) (uint16, int) {
if in.bytes == nil {
return nfcData.lookupString(in.str[p:])
}
return nfcData.lookup(in.bytes[p:])
}
func (in *input) charinfoNFKC(p int) (uint16, int) {
if in.bytes == nil {
return nfkcData.lookupString(in.str[p:])
}
return nfkcData.lookup(in.bytes[p:])
}
func (in *input) hangul(p int) (r rune) {
if in.bytes == nil {
if !isHangulString(in.str[p:]) {
return 0
}
r, _ = utf8.DecodeRuneInString(in.str[p:])
} else {
if !isHangul(in.bytes[p:]) {
return 0
}
r, _ = utf8.DecodeRune(in.bytes[p:])
}
return r
}

450
vendor/golang.org/x/text/unicode/norm/iter.go generated vendored Normal file
View file

@ -0,0 +1,450 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"unicode/utf8"
)
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
// sequence of starter and non-starter runes for the purpose of normalization.
const MaxSegmentSize = maxByteBufferSize
// An Iter iterates over a string or byte slice, while normalizing it
// to a given Form.
type Iter struct {
rb reorderBuffer
buf [maxByteBufferSize]byte
info Properties // first character saved from previous iteration
next iterFunc // implementation of next depends on form
asciiF iterFunc
p int // current position in input source
multiSeg []byte // remainder of multi-segment decomposition
}
type iterFunc func(*Iter) []byte
// Init initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) Init(f Form, src []byte) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.init(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p)
}
// InitString initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) InitString(f Form, src string) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.initString(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p)
}
// Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the
// start of a UTF8 rune.
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(i.p) + offset
case 2:
abs = int64(i.rb.nsrc) + offset
default:
return 0, fmt.Errorf("norm: invalid whence")
}
if abs < 0 {
return 0, fmt.Errorf("norm: negative position")
}
if int(abs) >= i.rb.nsrc {
i.setDone()
return int64(i.p), nil
}
i.p = int(abs)
i.multiSeg = nil
i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p)
return abs, nil
}
// returnSlice returns a slice of the underlying input type as a byte slice.
// If the underlying is of type []byte, it will simply return a slice.
// If the underlying is of type string, it will copy the slice to the buffer
// and return that.
func (i *Iter) returnSlice(a, b int) []byte {
if i.rb.src.bytes == nil {
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
}
return i.rb.src.bytes[a:b]
}
// Pos returns the byte position at which the next call to Next will commence processing.
func (i *Iter) Pos() int {
return i.p
}
func (i *Iter) setDone() {
i.next = nextDone
i.p = i.rb.nsrc
}
// Done returns true if there is no more input to process.
func (i *Iter) Done() bool {
return i.p >= i.rb.nsrc
}
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
// For any input a and b for which f(a) == f(b), subsequent calls
// to Next will return the same segments.
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
// Although not guaranteed, n will typically be the smallest possible n.
func (i *Iter) Next() []byte {
return i.next(i)
}
func nextASCIIBytes(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.setDone()
return i.rb.src.bytes[i.p:p]
}
if i.rb.src.bytes[p] < utf8.RuneSelf {
p0 := i.p
i.p = p
return i.rb.src.bytes[p0:p]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextASCIIString(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.buf[0] = i.rb.src.str[i.p]
i.setDone()
return i.buf[:1]
}
if i.rb.src.str[p] < utf8.RuneSelf {
i.buf[0] = i.rb.src.str[i.p]
i.p = p
return i.buf[:1]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextHangul(i *Iter) []byte {
p := i.p
next := p + hangulUTF8Size
if next >= i.rb.nsrc {
i.setDone()
} else if i.rb.src.hangul(next) == 0 {
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
i.p = next
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
}
func nextDone(i *Iter) []byte {
return nil
}
// nextMulti is used for iterating over multi-segment decompositions
// for decomposing normal forms.
func nextMulti(i *Iter) []byte {
j := 0
d := i.multiSeg
// skip first rune
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
}
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.multiSeg = d[j:]
return d[:j]
}
j += int(info.size)
}
// treat last segment as normal decomposition
i.next = i.rb.f.nextMain
return i.next(i)
}
// nextMultiNorm is used for iterating over multi-segment decompositions
// for composing normal forms.
func nextMultiNorm(i *Iter) []byte {
j := 0
d := i.multiSeg
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.ss.first(info)
i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):]
return seg
}
i.rb.ss.next(info)
i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size)
}
i.multiSeg = nil
i.next = nextComposed
return doNormComposed(i)
}
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
func nextDecomposed(i *Iter) (next []byte) {
outp := 0
inCopyStart, outCopyStart := i.p, 0
ss := mkStreamSafe(i.info)
for {
if sz := int(i.info.size); sz <= 1 {
p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1.
if i.p >= i.rb.nsrc {
i.setDone()
return i.returnSlice(p, i.p)
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.next = i.asciiF
return i.returnSlice(p, i.p)
}
outp++
} else if d := i.info.Decomposition(); d != nil {
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
// Case 1: there is a leftover to copy. In this case the decomposition
// must begin with a modifier and should always be appended.
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
p := outp + len(d)
if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
if p > len(i.buf) {
return i.buf[:outp]
}
} else if i.info.multiSegment() {
// outp must be 0 as multi-segment decompositions always
// start a new segment.
if i.multiSeg == nil {
i.multiSeg = d
i.next = nextMulti
return nextMulti(i)
}
// We are in the last segment. Treat as normal decomposition.
d = i.multiSeg
i.multiSeg = nil
p = len(d)
}
prevCC := i.info.tccc
if i.p += sz; i.p >= i.rb.nsrc {
i.setDone()
i.info = Properties{} // Force BoundaryBefore to succeed.
} else {
i.info = i.rb.f.info(i.rb.src, i.p)
}
switch ss.next(i.info) {
case ssOverflow:
i.next = nextCGJDecompose
fallthrough
case ssStarter:
if outp > 0 {
copy(i.buf[outp:], d)
return i.buf[:p]
}
return d
}
copy(i.buf[outp:], d)
outp = p
inCopyStart, outCopyStart = i.p, outp
if i.info.ccc < prevCC {
goto doNorm
}
continue
} else if r := i.rb.src.hangul(i.p); r != 0 {
outp = decomposeHangul(i.buf[:], r)
i.p += hangulUTF8Size
inCopyStart, outCopyStart = i.p, outp
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src.hangul(i.p) != 0 {
i.next = nextHangul
return i.buf[:outp]
}
} else {
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
}
if i.p >= i.rb.nsrc {
i.setDone()
break
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if v := ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJDecompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
if outCopyStart == 0 {
return i.returnSlice(inCopyStart, i.p)
} else if inCopyStart < i.p {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
}
return i.buf[:outp]
doNorm:
// Insert what we have decomposed so far in the reorderBuffer.
// As we will only reorder, there will always be enough room.
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
i.rb.insertDecomposed(i.buf[0:outp])
return doNormDecomposed(i)
}
func doNormDecomposed(i *Iter) []byte {
for {
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.ccc == 0 {
break
}
}
// new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])]
}
func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0
i.rb.insertCGJ()
i.next = nextDecomposed
buf := doNormDecomposed(i)
return buf
}
// nextComposed is the implementation of Next for forms NFC and NFKC.
func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p
var prevCC uint8
ss := mkStreamSafe(i.info)
for {
if !i.info.isYesC() {
goto doNorm
}
prevCC = i.info.tccc
sz := int(i.info.size)
if sz == 0 {
sz = 1 // illegal rune: copy byte-by-byte
}
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.next = i.asciiF
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if v := ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJCompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
return i.returnSlice(startp, i.p)
doNorm:
i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.multiSegment() {
d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0)
i.rb.insertUnsafe(input{bytes: d}, 0, info)
i.multiSeg = d[int(info.size):]
i.next = nextMultiNorm
return nextMultiNorm(i)
}
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
func doNormComposed(i *Iter) []byte {
// First rune should already be inserted.
for {
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if s := i.rb.ss.next(i.info); s == ssStarter {
break
} else if s == ssOverflow {
i.next = nextCGJCompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
}
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
return seg
}
func nextCGJCompose(i *Iter) []byte {
i.rb.ss = 0 // instead of first
i.rb.insertCGJ()
i.next = nextComposed
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
// If we ever change that, insert a check here.
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}

976
vendor/golang.org/x/text/unicode/norm/maketables.go generated vendored Normal file
View file

@ -0,0 +1,976 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Normalization table generator.
// Data read from the web.
// See forminfo.go for a description of the trie values associated with each rune.
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"sort"
"strconv"
"strings"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
func main() {
gen.Init()
loadUnicodeData()
compactCCC()
loadCompositionExclusions()
completeCharFields(FCanonical)
completeCharFields(FCompatibility)
computeNonStarterCounts()
verifyComputed()
printChars()
testDerived()
printTestdata()
makeTables()
}
var (
tablelist = flag.String("tables",
"all",
"comma-separated list of which tables to generate; "+
"can be 'decomp', 'recomp', 'info' and 'all'")
test = flag.Bool("test",
false,
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
verbose = flag.Bool("verbose",
false,
"write data to stdout as it is parsed")
)
const MaxChar = 0x10FFFF // anything above this shouldn't exist
// Quick Check properties of runes allow us to quickly
// determine whether a rune may occur in a normal form.
// For a given normal form, a rune may be guaranteed to occur
// verbatim (QC=Yes), may or may not combine with another
// rune (QC=Maybe), or may not occur (QC=No).
type QCResult int
const (
QCUnknown QCResult = iota
QCYes
QCNo
QCMaybe
)
func (r QCResult) String() string {
switch r {
case QCYes:
return "Yes"
case QCNo:
return "No"
case QCMaybe:
return "Maybe"
}
return "***UNKNOWN***"
}
const (
FCanonical = iota // NFC or NFD
FCompatibility // NFKC or NFKD
FNumberOfFormTypes
)
const (
MComposed = iota // NFC or NFKC
MDecomposed // NFD or NFKD
MNumberOfModes
)
// This contains only the properties we're interested in.
type Char struct {
name string
codePoint rune // if zero, this index is not a valid code point.
ccc uint8 // canonical combining class
origCCC uint8
excludeInComp bool // from CompositionExclusions.txt
compatDecomp bool // it has a compatibility expansion
nTrailingNonStarters uint8
nLeadingNonStarters uint8 // must be equal to trailing if non-zero
forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
state State
}
var chars = make([]Char, MaxChar+1)
var cccMap = make(map[uint8]uint8)
func (c Char) String() string {
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
fmt.Fprintf(buf, " state: %v\n", c.state)
fmt.Fprintf(buf, " NFC:\n")
fmt.Fprint(buf, c.forms[FCanonical])
fmt.Fprintf(buf, " NFKC:\n")
fmt.Fprint(buf, c.forms[FCompatibility])
return buf.String()
}
// In UnicodeData.txt, some ranges are marked like this:
// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
// parseCharacter keeps a state variable indicating the weirdness.
type State int
const (
SNormal State = iota // known to be zero for the type
SFirst
SLast
SMissing
)
var lastChar = rune('\u0000')
func (c Char) isValid() bool {
return c.codePoint != 0 && c.state != SMissing
}
type FormInfo struct {
quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
verified [MNumberOfModes]bool // index: MComposed or MDecomposed
combinesForward bool // May combine with rune on the right
combinesBackward bool // May combine with rune on the left
isOneWay bool // Never appears in result
inDecomp bool // Some decompositions result in this char.
decomp Decomposition
expandedDecomp Decomposition
}
func (f FormInfo) String() string {
buf := bytes.NewBuffer(make([]byte, 0))
fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
return buf.String()
}
type Decomposition []rune
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
decomp := strings.Split(s, " ")
if len(decomp) > 0 && skipfirst {
decomp = decomp[1:]
}
for _, d := range decomp {
point, err := strconv.ParseUint(d, 16, 64)
if err != nil {
return a, err
}
a = append(a, rune(point))
}
return a, nil
}
func loadUnicodeData() {
f := gen.OpenUCDFile("UnicodeData.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(ucd.CodePoint)
char := &chars[r]
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
decmap := p.String(ucd.DecompMapping)
exp, err := parseDecomposition(decmap, false)
isCompat := false
if err != nil {
if len(decmap) > 0 {
exp, err = parseDecomposition(decmap, true)
if err != nil {
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
}
isCompat = true
}
}
char.name = p.String(ucd.Name)
char.codePoint = r
char.forms[FCompatibility].decomp = exp
if !isCompat {
char.forms[FCanonical].decomp = exp
} else {
char.compatDecomp = true
}
if len(decmap) > 0 {
char.forms[FCompatibility].decomp = exp
}
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
}
// compactCCC converts the sparse set of CCC values to a continguous one,
// reducing the number of bits needed from 8 to 6.
func compactCCC() {
m := make(map[uint8]uint8)
for i := range chars {
c := &chars[i]
m[c.ccc] = 0
}
cccs := []int{}
for v, _ := range m {
cccs = append(cccs, int(v))
}
sort.Ints(cccs)
for i, c := range cccs {
cccMap[uint8(i)] = uint8(c)
m[uint8(c)] = uint8(i)
}
for i := range chars {
c := &chars[i]
c.origCCC = c.ccc
c.ccc = m[c.ccc]
}
if len(m) >= 1<<6 {
log.Fatalf("too many difference CCC values: %d >= 64", len(m))
}
}
// CompositionExclusions.txt has form:
// 0958 # ...
// See http://unicode.org/reports/tr44/ for full explanation
func loadCompositionExclusions() {
f := gen.OpenUCDFile("CompositionExclusions.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
c := &chars[p.Rune(0)]
if c.excludeInComp {
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
}
c.excludeInComp = true
}
if e := p.Err(); e != nil {
log.Fatal(e)
}
}
// hasCompatDecomp returns true if any of the recursive
// decompositions contains a compatibility expansion.
// In this case, the character may not occur in NFK*.
func hasCompatDecomp(r rune) bool {
c := &chars[r]
if c.compatDecomp {
return true
}
for _, d := range c.forms[FCompatibility].decomp {
if hasCompatDecomp(d) {
return true
}
}
return false
}
// Hangul related constants.
const (
HangulBase = 0xAC00
HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
JamoLBase = 0x1100
JamoLEnd = 0x1113
JamoVBase = 0x1161
JamoVEnd = 0x1176
JamoTBase = 0x11A8
JamoTEnd = 0x11C3
JamoLVTCount = 19 * 21 * 28
JamoTCount = 28
)
func isHangul(r rune) bool {
return HangulBase <= r && r < HangulEnd
}
func isHangulWithoutJamoT(r rune) bool {
if !isHangul(r) {
return false
}
r -= HangulBase
return r < JamoLVTCount && r%JamoTCount == 0
}
func ccc(r rune) uint8 {
return chars[r].ccc
}
// Insert a rune in a buffer, ordered by Canonical Combining Class.
func insertOrdered(b Decomposition, r rune) Decomposition {
n := len(b)
b = append(b, 0)
cc := ccc(r)
if cc > 0 {
// Use bubble sort.
for ; n > 0; n-- {
if ccc(b[n-1]) <= cc {
break
}
b[n] = b[n-1]
}
}
b[n] = r
return b
}
// Recursively decompose.
func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
dcomp := chars[r].forms[form].decomp
if len(dcomp) == 0 {
return insertOrdered(d, r)
}
for _, c := range dcomp {
d = decomposeRecursive(form, c, d)
}
return d
}
func completeCharFields(form int) {
// Phase 0: pre-expand decomposition.
for i := range chars {
f := &chars[i].forms[form]
if len(f.decomp) == 0 {
continue
}
exp := make(Decomposition, 0)
for _, c := range f.decomp {
exp = decomposeRecursive(form, c, exp)
}
f.expandedDecomp = exp
}
// Phase 1: composition exclusion, mark decomposition.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
// Marks script-specific exclusions and version restricted.
f.isOneWay = c.excludeInComp
// Singletons
f.isOneWay = f.isOneWay || len(f.decomp) == 1
// Non-starter decompositions
if len(f.decomp) > 1 {
chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
f.isOneWay = f.isOneWay || chk
}
// Runes that decompose into more than two runes.
f.isOneWay = f.isOneWay || len(f.decomp) > 2
if form == FCompatibility {
f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
}
for _, r := range f.decomp {
chars[r].forms[form].inDecomp = true
}
}
// Phase 2: forward and backward combining.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
if !f.isOneWay && len(f.decomp) == 2 {
f0 := &chars[f.decomp[0]].forms[form]
f1 := &chars[f.decomp[1]].forms[form]
if !f0.isOneWay {
f0.combinesForward = true
}
if !f1.isOneWay {
f1.combinesBackward = true
}
}
if isHangulWithoutJamoT(rune(i)) {
f.combinesForward = true
}
}
// Phase 3: quick check values.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
switch {
case len(f.decomp) > 0:
f.quickCheck[MDecomposed] = QCNo
case isHangul(rune(i)):
f.quickCheck[MDecomposed] = QCNo
default:
f.quickCheck[MDecomposed] = QCYes
}
switch {
case f.isOneWay:
f.quickCheck[MComposed] = QCNo
case (i & 0xffff00) == JamoLBase:
f.quickCheck[MComposed] = QCYes
if JamoLBase <= i && i < JamoLEnd {
f.combinesForward = true
}
if JamoVBase <= i && i < JamoVEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
f.combinesForward = true
}
if JamoTBase <= i && i < JamoTEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
}
case !f.combinesBackward:
f.quickCheck[MComposed] = QCYes
default:
f.quickCheck[MComposed] = QCMaybe
}
}
}
func computeNonStarterCounts() {
// Phase 4: leading and trailing non-starter count
for i := range chars {
c := &chars[i]
runes := []rune{rune(i)}
// We always use FCompatibility so that the CGJ insertion points do not
// change for repeated normalizations with different forms.
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
runes = exp
}
// We consider runes that combine backwards to be non-starters for the
// purpose of Stream-Safe Text Processing.
for _, r := range runes {
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nLeadingNonStarters++
}
for i := len(runes) - 1; i >= 0; i-- {
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nTrailingNonStarters++
}
if c.nTrailingNonStarters > 3 {
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
}
if isHangul(rune(i)) {
c.nTrailingNonStarters = 2
if isHangulWithoutJamoT(rune(i)) {
c.nTrailingNonStarters = 1
}
}
if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
}
if t := c.nTrailingNonStarters; t > 3 {
log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
}
}
}
func printBytes(w io.Writer, b []byte, name string) {
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
fmt.Fprintf(w, "var %s = [...]byte {", name)
for i, c := range b {
switch {
case i%64 == 0:
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
case i%8 == 0:
fmt.Fprintf(w, "\n")
}
fmt.Fprintf(w, "0x%.2X, ", c)
}
fmt.Fprint(w, "\n}\n\n")
}
// See forminfo.go for format.
func makeEntry(f *FormInfo, c *Char) uint16 {
e := uint16(0)
if r := c.codePoint; HangulBase <= r && r < HangulEnd {
e |= 0x40
}
if f.combinesForward {
e |= 0x20
}
if f.quickCheck[MDecomposed] == QCNo {
e |= 0x4
}
switch f.quickCheck[MComposed] {
case QCYes:
case QCNo:
e |= 0x10
case QCMaybe:
e |= 0x18
default:
log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
}
e |= uint16(c.nTrailingNonStarters)
return e
}
// decompSet keeps track of unique decompositions, grouped by whether
// the decomposition is followed by a trailing and/or leading CCC.
type decompSet [7]map[string]bool
const (
normalDecomp = iota
firstMulti
firstCCC
endMulti
firstLeadingCCC
firstCCCZeroExcept
firstStarterWithNLead
lastDecomp
)
var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
func makeDecompSet() decompSet {
m := decompSet{}
for i := range m {
m[i] = make(map[string]bool)
}
return m
}
func (m *decompSet) insert(key int, s string) {
m[key][s] = true
}
func printCharInfoTables(w io.Writer) int {
mkstr := func(r rune, f *FormInfo) (int, string) {
d := f.expandedDecomp
s := string([]rune(d))
if max := 1 << 6; len(s) >= max {
const msg = "%U: too many bytes in decomposition: %d >= %d"
log.Fatalf(msg, r, len(s), max)
}
head := uint8(len(s))
if f.quickCheck[MComposed] != QCYes {
head |= 0x40
}
if f.combinesForward {
head |= 0x80
}
s = string([]byte{head}) + s
lccc := ccc(d[0])
tccc := ccc(d[len(d)-1])
cc := ccc(r)
if cc != 0 && lccc == 0 && tccc == 0 {
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
}
if tccc < lccc && lccc != 0 {
const msg = "%U: lccc (%d) must be <= tcc (%d)"
log.Fatalf(msg, r, lccc, tccc)
}
index := normalDecomp
nTrail := chars[r].nTrailingNonStarters
nLead := chars[r].nLeadingNonStarters
if tccc > 0 || lccc > 0 || nTrail > 0 {
tccc <<= 2
tccc |= nTrail
s += string([]byte{tccc})
index = endMulti
for _, r := range d[1:] {
if ccc(r) == 0 {
index = firstCCC
}
}
if lccc > 0 || nLead > 0 {
s += string([]byte{lccc})
if index == firstCCC {
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
}
index = firstLeadingCCC
}
if cc != lccc {
if cc != 0 {
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
}
index = firstCCCZeroExcept
}
} else if len(d) > 1 {
index = firstMulti
}
return index, s
}
decompSet := makeDecompSet()
const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
decompSet.insert(firstStarterWithNLead, nLeadStr)
// Store the uniqued decompositions in a byte buffer,
// preceded by their byte length.
for _, c := range chars {
for _, f := range c.forms {
if len(f.expandedDecomp) == 0 {
continue
}
if f.combinesBackward {
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
}
index, s := mkstr(c.codePoint, &f)
decompSet.insert(index, s)
}
}
decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
size := 0
positionMap := make(map[string]uint16)
decompositions.WriteString("\000")
fmt.Fprintln(w, "const (")
for i, m := range decompSet {
sa := []string{}
for s := range m {
sa = append(sa, s)
}
sort.Strings(sa)
for _, s := range sa {
p := decompositions.Len()
decompositions.WriteString(s)
positionMap[s] = uint16(p)
}
if cname[i] != "" {
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
}
}
fmt.Fprintln(w, "maxDecomp = 0x8000")
fmt.Fprintln(w, ")")
b := decompositions.Bytes()
printBytes(w, b, "decomps")
size += len(b)
varnames := []string{"nfc", "nfkc"}
for i := 0; i < FNumberOfFormTypes; i++ {
trie := triegen.NewTrie(varnames[i])
for r, c := range chars {
f := c.forms[i]
d := f.expandedDecomp
if len(d) != 0 {
_, key := mkstr(c.codePoint, &f)
trie.Insert(rune(r), uint64(positionMap[key]))
if c.ccc != ccc(d[0]) {
// We assume the lead ccc of a decomposition !=0 in this case.
if ccc(d[0]) == 0 {
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
}
}
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
// Handle cases where it can't be detected that the nLead should be equal
// to nTrail.
trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
trie.Insert(c.codePoint, uint64(0x8000|v))
}
}
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
if err != nil {
log.Fatal(err)
}
size += sz
}
return size
}
func contains(sa []string, s string) bool {
for _, a := range sa {
if a == s {
return true
}
}
return false
}
func makeTables() {
w := &bytes.Buffer{}
size := 0
if *tablelist == "" {
return
}
list := strings.Split(*tablelist, ",")
if *tablelist == "all" {
list = []string{"recomp", "info"}
}
// Compute maximum decomposition size.
max := 0
for _, c := range chars {
if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
max = n
}
}
fmt.Fprintln(w, "const (")
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
fmt.Fprintln(w)
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
fmt.Fprintln(w, ")\n")
// Print the CCC remap table.
size += len(cccMap)
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
for i := 0; i < len(cccMap); i++ {
if i%8 == 0 {
fmt.Fprintln(w)
}
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
}
fmt.Fprintln(w, "\n}\n")
if contains(list, "info") {
size += printCharInfoTables(w)
}
if contains(list, "recomp") {
// Note that we use 32 bit keys, instead of 64 bit.
// This clips the bits of three entries, but we know
// this won't cause a collision. The compiler will catch
// any changes made to UnicodeData.txt that introduces
// a collision.
// Note that the recomposition map for NFC and NFKC
// are identical.
// Recomposition map
nrentries := 0
for _, c := range chars {
f := c.forms[FCanonical]
if !f.isOneWay && len(f.decomp) > 0 {
nrentries++
}
}
sz := nrentries * 8
size += sz
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
fmt.Fprintln(w, "var recompMap = map[uint32]rune{")
for i, c := range chars {
f := c.forms[FCanonical]
d := f.decomp
if !f.isOneWay && len(d) > 0 {
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i)
}
}
fmt.Fprintf(w, "}\n\n")
}
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
gen.WriteGoFile("tables.go", "norm", w.Bytes())
}
func printChars() {
if *verbose {
for _, c := range chars {
if !c.isValid() || c.state == SMissing {
continue
}
fmt.Println(c)
}
}
}
// verifyComputed does various consistency tests.
func verifyComputed() {
for i, c := range chars {
for _, f := range c.forms {
isNo := (f.quickCheck[MDecomposed] == QCNo)
if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
}
isMaybe := f.quickCheck[MComposed] == QCMaybe
if f.combinesBackward != isMaybe {
log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
}
if len(f.decomp) > 0 && f.combinesForward && isMaybe {
log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
}
if len(f.expandedDecomp) != 0 {
continue
}
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
// We accept these runes to be treated differently (it only affects
// segment breaking in iteration, most likely on improper use), but
// reconsider if more characters are added.
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
}
}
}
nfc := c.forms[FCanonical]
nfkc := c.forms[FCompatibility]
if nfc.combinesBackward != nfkc.combinesBackward {
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
}
}
}
// Use values in DerivedNormalizationProps.txt to compare against the
// values we computed.
// DerivedNormalizationProps.txt has form:
// 00C0..00C5 ; NFD_QC; N # ...
// 0374 ; NFD_QC; N # ...
// See http://unicode.org/reports/tr44/ for full explanation
func testDerived() {
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(0)
c := &chars[r]
var ftype, mode int
qt := p.String(1)
switch qt {
case "NFC_QC":
ftype, mode = FCanonical, MComposed
case "NFD_QC":
ftype, mode = FCanonical, MDecomposed
case "NFKC_QC":
ftype, mode = FCompatibility, MComposed
case "NFKD_QC":
ftype, mode = FCompatibility, MDecomposed
default:
continue
}
var qr QCResult
switch p.String(2) {
case "Y":
qr = QCYes
case "N":
qr = QCNo
case "M":
qr = QCMaybe
default:
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
}
if got := c.forms[ftype].quickCheck[mode]; got != qr {
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
}
c.forms[ftype].verified[mode] = true
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
// Any unspecified value must be QCYes. Verify this.
for i, c := range chars {
for j, fd := range c.forms {
for k, qr := range fd.quickCheck {
if !fd.verified[k] && qr != QCYes {
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
log.Printf(m, i, j, k, qr, c.name)
}
}
}
}
}
var testHeader = `const (
Yes = iota
No
Maybe
)
type formData struct {
qc uint8
combinesForward bool
decomposition string
}
type runeData struct {
r rune
ccc uint8
nLead uint8
nTrail uint8
f [2]formData // 0: canonical; 1: compatibility
}
func f(qc uint8, cf bool, dec string) [2]formData {
return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
}
func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
}
var testData = []runeData{
`
func printTestdata() {
type lastInfo struct {
ccc uint8
nLead uint8
nTrail uint8
f string
}
last := lastInfo{}
w := &bytes.Buffer{}
fmt.Fprintf(w, testHeader)
for r, c := range chars {
f := c.forms[FCanonical]
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
f = c.forms[FCompatibility]
qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
s := ""
if d == dk && qc == qck && cf == cfk {
s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
} else {
s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
}
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
if last != current {
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
last = current
}
}
fmt.Fprintln(w, "}")
gen.WriteGoFile("data_test.go", "norm", w.Bytes())
}

609
vendor/golang.org/x/text/unicode/norm/normalize.go generated vendored Normal file
View file

@ -0,0 +1,609 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Note: the file data_test.go that is generated should not be checked in.
//go:generate go run maketables.go triegen.go
//go:generate go test -tags test
// Package norm contains types and functions for normalizing Unicode strings.
package norm // import "golang.org/x/text/unicode/norm"
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// A Form denotes a canonical representation of Unicode code points.
// The Unicode-defined normalization and equivalence forms are:
//
// NFC Unicode Normalization Form C
// NFD Unicode Normalization Form D
// NFKC Unicode Normalization Form KC
// NFKD Unicode Normalization Form KD
//
// For a Form f, this documentation uses the notation f(x) to mean
// the bytes or string x converted to the given form.
// A position n in x is called a boundary if conversion to the form can
// proceed independently on both sides:
// f(x) == append(f(x[0:n]), f(x[n:])...)
//
// References: http://unicode.org/reports/tr15/ and
// http://unicode.org/notes/tn5/.
type Form int
const (
NFC Form = iota
NFD
NFKC
NFKD
)
// Bytes returns f(b). May return b if f(b) = b.
func (f Form) Bytes(b []byte) []byte {
src := inputBytes(b)
ft := formTable[f]
n, ok := ft.quickSpan(src, 0, len(b), true)
if ok {
return b
}
out := make([]byte, n, len(b))
copy(out, b[0:n])
rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush}
return doAppendInner(&rb, n)
}
// String returns f(s).
func (f Form) String(s string) string {
src := inputString(s)
ft := formTable[f]
n, ok := ft.quickSpan(src, 0, len(s), true)
if ok {
return s
}
out := make([]byte, n, len(s))
copy(out, s[0:n])
rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush}
return string(doAppendInner(&rb, n))
}
// IsNormal returns true if b == f(b).
func (f Form) IsNormal(b []byte) bool {
src := inputBytes(b)
ft := formTable[f]
bp, ok := ft.quickSpan(src, 0, len(b), true)
if ok {
return true
}
rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)}
rb.setFlusher(nil, cmpNormalBytes)
for bp < len(b) {
rb.out = b[bp:]
if bp = decomposeSegment(&rb, bp, true); bp < 0 {
return false
}
bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true)
}
return true
}
func cmpNormalBytes(rb *reorderBuffer) bool {
b := rb.out
for i := 0; i < rb.nrune; i++ {
info := rb.rune[i]
if int(info.size) > len(b) {
return false
}
p := info.pos
pe := p + info.size
for ; p < pe; p++ {
if b[0] != rb.byte[p] {
return false
}
b = b[1:]
}
}
return true
}
// IsNormalString returns true if s == f(s).
func (f Form) IsNormalString(s string) bool {
src := inputString(s)
ft := formTable[f]
bp, ok := ft.quickSpan(src, 0, len(s), true)
if ok {
return true
}
rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)}
rb.setFlusher(nil, func(rb *reorderBuffer) bool {
for i := 0; i < rb.nrune; i++ {
info := rb.rune[i]
if bp+int(info.size) > len(s) {
return false
}
p := info.pos
pe := p + info.size
for ; p < pe; p++ {
if s[bp] != rb.byte[p] {
return false
}
bp++
}
}
return true
})
for bp < len(s) {
if bp = decomposeSegment(&rb, bp, true); bp < 0 {
return false
}
bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true)
}
return true
}
// patchTail fixes a case where a rune may be incorrectly normalized
// if it is followed by illegal continuation bytes. It returns the
// patched buffer and whether the decomposition is still in progress.
func patchTail(rb *reorderBuffer) bool {
info, p := lastRuneStart(&rb.f, rb.out)
if p == -1 || info.size == 0 {
return true
}
end := p + int(info.size)
extra := len(rb.out) - end
if extra > 0 {
// Potentially allocating memory. However, this only
// happens with ill-formed UTF-8.
x := make([]byte, 0)
x = append(x, rb.out[len(rb.out)-extra:]...)
rb.out = rb.out[:end]
decomposeToLastBoundary(rb)
rb.doFlush()
rb.out = append(rb.out, x...)
return false
}
buf := rb.out[p:]
rb.out = rb.out[:p]
decomposeToLastBoundary(rb)
if s := rb.ss.next(info); s == ssStarter {
rb.doFlush()
rb.ss.first(info)
} else if s == ssOverflow {
rb.doFlush()
rb.insertCGJ()
rb.ss = 0
}
rb.insertUnsafe(inputBytes(buf), 0, info)
return true
}
func appendQuick(rb *reorderBuffer, i int) int {
if rb.nsrc == i {
return i
}
end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true)
rb.out = rb.src.appendSlice(rb.out, i, end)
return end
}
// Append returns f(append(out, b...)).
// The buffer out must be nil, empty, or equal to f(out).
func (f Form) Append(out []byte, src ...byte) []byte {
return f.doAppend(out, inputBytes(src), len(src))
}
func (f Form) doAppend(out []byte, src input, n int) []byte {
if n == 0 {
return out
}
ft := formTable[f]
// Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer.
if len(out) == 0 {
p, _ := ft.quickSpan(src, 0, n, true)
out = src.appendSlice(out, 0, p)
if p == n {
return out
}
rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush}
return doAppendInner(&rb, p)
}
rb := reorderBuffer{f: *ft, src: src, nsrc: n}
return doAppend(&rb, out, 0)
}
func doAppend(rb *reorderBuffer, out []byte, p int) []byte {
rb.setFlusher(out, appendFlush)
src, n := rb.src, rb.nsrc
doMerge := len(out) > 0
if q := src.skipContinuationBytes(p); q > p {
// Move leading non-starters to destination.
rb.out = src.appendSlice(rb.out, p, q)
p = q
doMerge = patchTail(rb)
}
fd := &rb.f
if doMerge {
var info Properties
if p < n {
info = fd.info(src, p)
if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 {
if p == 0 {
decomposeToLastBoundary(rb)
}
p = decomposeSegment(rb, p, true)
}
}
if info.size == 0 {
rb.doFlush()
// Append incomplete UTF-8 encoding.
return src.appendSlice(rb.out, p, n)
}
if rb.nrune > 0 {
return doAppendInner(rb, p)
}
}
p = appendQuick(rb, p)
return doAppendInner(rb, p)
}
func doAppendInner(rb *reorderBuffer, p int) []byte {
for n := rb.nsrc; p < n; {
p = decomposeSegment(rb, p, true)
p = appendQuick(rb, p)
}
return rb.out
}
// AppendString returns f(append(out, []byte(s))).
// The buffer out must be nil, empty, or equal to f(out).
func (f Form) AppendString(out []byte, src string) []byte {
return f.doAppend(out, inputString(src), len(src))
}
// QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) QuickSpan(b []byte) int {
n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true)
return n
}
// Span implements transform.SpanningTransformer. It returns a boundary n such
// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n.
func (f Form) Span(b []byte, atEOF bool) (n int, err error) {
n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF)
if n < len(b) {
if !ok {
err = transform.ErrEndOfSpan
} else {
err = transform.ErrShortSrc
}
}
return n, err
}
// SpanString returns a boundary n such that s[0:n] == f(s[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) SpanString(s string, atEOF bool) (n int, err error) {
n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF)
if n < len(s) {
if !ok {
err = transform.ErrEndOfSpan
} else {
err = transform.ErrShortSrc
}
}
return n, err
}
// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
// whether any non-normalized parts were found. If atEOF is false, n will
// not point past the last segment if this segment might be become
// non-normalized by appending other runes.
func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) {
var lastCC uint8
ss := streamSafe(0)
lastSegStart := i
for n = end; i < n; {
if j := src.skipASCII(i, n); i != j {
i = j
lastSegStart = i - 1
lastCC = 0
ss = 0
continue
}
info := f.info(src, i)
if info.size == 0 {
if atEOF {
// include incomplete runes
return n, true
}
return lastSegStart, true
}
// This block needs to be before the next, because it is possible to
// have an overflow for runes that are starters (e.g. with U+FF9E).
switch ss.next(info) {
case ssStarter:
ss.first(info)
lastSegStart = i
case ssOverflow:
return lastSegStart, false
case ssSuccess:
if lastCC > info.ccc {
return lastSegStart, false
}
}
if f.composing {
if !info.isYesC() {
break
}
} else {
if !info.isYesD() {
break
}
}
lastCC = info.ccc
i += int(info.size)
}
if i == n {
if !atEOF {
n = lastSegStart
}
return n, true
}
return lastSegStart, false
}
// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) QuickSpanString(s string) int {
n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
return n
}
// FirstBoundary returns the position i of the first boundary in b
// or -1 if b contains no boundary.
func (f Form) FirstBoundary(b []byte) int {
return f.firstBoundary(inputBytes(b), len(b))
}
func (f Form) firstBoundary(src input, nsrc int) int {
i := src.skipContinuationBytes(0)
if i >= nsrc {
return -1
}
fd := formTable[f]
ss := streamSafe(0)
// We should call ss.first here, but we can't as the first rune is
// skipped already. This means FirstBoundary can't really determine
// CGJ insertion points correctly. Luckily it doesn't have to.
for {
info := fd.info(src, i)
if info.size == 0 {
return -1
}
if s := ss.next(info); s != ssSuccess {
return i
}
i += int(info.size)
if i >= nsrc {
if !info.BoundaryAfter() && !ss.isMax() {
return -1
}
return nsrc
}
}
}
// FirstBoundaryInString returns the position i of the first boundary in s
// or -1 if s contains no boundary.
func (f Form) FirstBoundaryInString(s string) int {
return f.firstBoundary(inputString(s), len(s))
}
// NextBoundary reports the index of the boundary between the first and next
// segment in b or -1 if atEOF is false and there are not enough bytes to
// determine this boundary.
func (f Form) NextBoundary(b []byte, atEOF bool) int {
return f.nextBoundary(inputBytes(b), len(b), atEOF)
}
// NextBoundaryInString reports the index of the boundary between the first and
// next segment in b or -1 if atEOF is false and there are not enough bytes to
// determine this boundary.
func (f Form) NextBoundaryInString(s string, atEOF bool) int {
return f.nextBoundary(inputString(s), len(s), atEOF)
}
func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
if nsrc == 0 {
if atEOF {
return 0
}
return -1
}
fd := formTable[f]
info := fd.info(src, 0)
if info.size == 0 {
if atEOF {
return 1
}
return -1
}
ss := streamSafe(0)
ss.first(info)
for i := int(info.size); i < nsrc; i += int(info.size) {
info = fd.info(src, i)
if info.size == 0 {
if atEOF {
return i
}
return -1
}
if s := ss.next(info); s != ssSuccess {
return i
}
}
if !atEOF && !info.BoundaryAfter() && !ss.isMax() {
return -1
}
return nsrc
}
// LastBoundary returns the position i of the last boundary in b
// or -1 if b contains no boundary.
func (f Form) LastBoundary(b []byte) int {
return lastBoundary(formTable[f], b)
}
func lastBoundary(fd *formInfo, b []byte) int {
i := len(b)
info, p := lastRuneStart(fd, b)
if p == -1 {
return -1
}
if info.size == 0 { // ends with incomplete rune
if p == 0 { // starts with incomplete rune
return -1
}
i = p
info, p = lastRuneStart(fd, b[:i])
if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter
return i
}
}
if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8
return i
}
if info.BoundaryAfter() {
return i
}
ss := streamSafe(0)
v := ss.backwards(info)
for i = p; i >= 0 && v != ssStarter; i = p {
info, p = lastRuneStart(fd, b[:i])
if v = ss.backwards(info); v == ssOverflow {
break
}
if p+int(info.size) != i {
if p == -1 { // no boundary found
return -1
}
return i // boundary after an illegal UTF-8 encoding
}
}
return i
}
// decomposeSegment scans the first segment in src into rb. It inserts 0x034f
// (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters
// and returns the number of bytes consumed from src or iShortDst or iShortSrc.
func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
// Force one character to be consumed.
info := rb.f.info(rb.src, sp)
if info.size == 0 {
return 0
}
if rb.nrune > 0 {
if s := rb.ss.next(info); s == ssStarter {
goto end
} else if s == ssOverflow {
rb.insertCGJ()
goto end
}
} else {
rb.ss.first(info)
}
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
return int(err)
}
for {
sp += int(info.size)
if sp >= rb.nsrc {
if !atEOF && !info.BoundaryAfter() {
return int(iShortSrc)
}
break
}
info = rb.f.info(rb.src, sp)
if info.size == 0 {
if !atEOF {
return int(iShortSrc)
}
break
}
if s := rb.ss.next(info); s == ssStarter {
break
} else if s == ssOverflow {
rb.insertCGJ()
break
}
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
return int(err)
}
}
end:
if !rb.doFlush() {
return int(iShortDst)
}
return sp
}
// lastRuneStart returns the runeInfo and position of the last
// rune in buf or the zero runeInfo and -1 if no rune was found.
func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) {
p := len(buf) - 1
for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- {
}
if p < 0 {
return Properties{}, -1
}
return fd.info(inputBytes(buf), p), p
}
// decomposeToLastBoundary finds an open segment at the end of the buffer
// and scans it into rb. Returns the buffer minus the last segment.
func decomposeToLastBoundary(rb *reorderBuffer) {
fd := &rb.f
info, i := lastRuneStart(fd, rb.out)
if int(info.size) != len(rb.out)-i {
// illegal trailing continuation bytes
return
}
if info.BoundaryAfter() {
return
}
var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order
padd := 0
ss := streamSafe(0)
p := len(rb.out)
for {
add[padd] = info
v := ss.backwards(info)
if v == ssOverflow {
// Note that if we have an overflow, it the string we are appending to
// is not correctly normalized. In this case the behavior is undefined.
break
}
padd++
p -= int(info.size)
if v == ssStarter || p < 0 {
break
}
info, i = lastRuneStart(fd, rb.out[:p])
if int(info.size) != p-i {
break
}
}
rb.ss = ss
// Copy bytes for insertion as we may need to overwrite rb.out.
var buf [maxBufferSize * utf8.UTFMax]byte
cp := buf[:copy(buf[:], rb.out[p:])]
rb.out = rb.out[:p]
for padd--; padd >= 0; padd-- {
info = add[padd]
rb.insertUnsafe(inputBytes(cp), 0, info)
cp = cp[info.size:]
}
}

125
vendor/golang.org/x/text/unicode/norm/readwriter.go generated vendored Normal file
View file

@ -0,0 +1,125 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "io"
type normWriter struct {
rb reorderBuffer
w io.Writer
buf []byte
}
// Write implements the standard write interface. If the last characters are
// not at a normalization boundary, the bytes will be buffered for the next
// write. The remaining bytes will be written on close.
func (w *normWriter) Write(data []byte) (n int, err error) {
// Process data in pieces to keep w.buf size bounded.
const chunk = 4000
for len(data) > 0 {
// Normalize into w.buf.
m := len(data)
if m > chunk {
m = chunk
}
w.rb.src = inputBytes(data[:m])
w.rb.nsrc = m
w.buf = doAppend(&w.rb, w.buf, 0)
data = data[m:]
n += m
// Write out complete prefix, save remainder.
// Note that lastBoundary looks back at most 31 runes.
i := lastBoundary(&w.rb.f, w.buf)
if i == -1 {
i = 0
}
if i > 0 {
if _, err = w.w.Write(w.buf[:i]); err != nil {
break
}
bn := copy(w.buf, w.buf[i:])
w.buf = w.buf[:bn]
}
}
return n, err
}
// Close forces data that remains in the buffer to be written.
func (w *normWriter) Close() error {
if len(w.buf) > 0 {
_, err := w.w.Write(w.buf)
if err != nil {
return err
}
}
return nil
}
// Writer returns a new writer that implements Write(b)
// by writing f(b) to w. The returned writer may use an
// an internal buffer to maintain state across Write calls.
// Calling its Close method writes any buffered data to w.
func (f Form) Writer(w io.Writer) io.WriteCloser {
wr := &normWriter{rb: reorderBuffer{}, w: w}
wr.rb.init(f, nil)
return wr
}
type normReader struct {
rb reorderBuffer
r io.Reader
inbuf []byte
outbuf []byte
bufStart int
lastBoundary int
err error
}
// Read implements the standard read interface.
func (r *normReader) Read(p []byte) (int, error) {
for {
if r.lastBoundary-r.bufStart > 0 {
n := copy(p, r.outbuf[r.bufStart:r.lastBoundary])
r.bufStart += n
if r.lastBoundary-r.bufStart > 0 {
return n, nil
}
return n, r.err
}
if r.err != nil {
return 0, r.err
}
outn := copy(r.outbuf, r.outbuf[r.lastBoundary:])
r.outbuf = r.outbuf[0:outn]
r.bufStart = 0
n, err := r.r.Read(r.inbuf)
r.rb.src = inputBytes(r.inbuf[0:n])
r.rb.nsrc, r.err = n, err
if n > 0 {
r.outbuf = doAppend(&r.rb, r.outbuf, 0)
}
if err == io.EOF {
r.lastBoundary = len(r.outbuf)
} else {
r.lastBoundary = lastBoundary(&r.rb.f, r.outbuf)
if r.lastBoundary == -1 {
r.lastBoundary = 0
}
}
}
}
// Reader returns a new reader that implements Read
// by reading data from r and returning f(data).
func (f Form) Reader(r io.Reader) io.Reader {
const chunk = 4000
buf := make([]byte, chunk)
rr := &normReader{rb: reorderBuffer{}, r: r, inbuf: buf}
rr.rb.init(f, buf)
return rr
}

7631
vendor/golang.org/x/text/unicode/norm/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

88
vendor/golang.org/x/text/unicode/norm/transform.go generated vendored Normal file
View file

@ -0,0 +1,88 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// Reset implements the Reset method of the transform.Transformer interface.
func (Form) Reset() {}
// Transform implements the Transform method of the transform.Transformer
// interface. It may need to write segments of up to MaxSegmentSize at once.
// Users should either catch ErrShortDst and allow dst to grow or have dst be at
// least of size MaxTransformChunkSize to be guaranteed of progress.
func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := 0
// Cap the maximum number of src bytes to check.
b := src
eof := atEOF
if ns := len(dst); ns < len(b) {
err = transform.ErrShortDst
eof = false
b = b[:ns]
}
i, ok := formTable[f].quickSpan(inputBytes(b), n, len(b), eof)
n += copy(dst[n:], b[n:i])
if !ok {
nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF)
return nDst + n, nSrc + n, err
}
if n < len(src) && !atEOF {
err = transform.ErrShortSrc
}
return n, n, err
}
func flushTransform(rb *reorderBuffer) bool {
// Write out (must fully fit in dst, or else it is a ErrShortDst).
if len(rb.out) < rb.nrune*utf8.UTFMax {
return false
}
rb.out = rb.out[rb.flushCopy(rb.out):]
return true
}
var errs = []error{nil, transform.ErrShortDst, transform.ErrShortSrc}
// transform implements the transform.Transformer interface. It is only called
// when quickSpan does not pass for a given string.
func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// TODO: get rid of reorderBuffer. See CL 23460044.
rb := reorderBuffer{}
rb.init(f, src)
for {
// Load segment into reorder buffer.
rb.setFlusher(dst[nDst:], flushTransform)
end := decomposeSegment(&rb, nSrc, atEOF)
if end < 0 {
return nDst, nSrc, errs[-end]
}
nDst = len(dst) - len(rb.out)
nSrc = end
// Next quickSpan.
end = rb.nsrc
eof := atEOF
if n := nSrc + len(dst) - nDst; n < end {
err = transform.ErrShortDst
end = n
eof = false
}
end, ok := rb.f.quickSpan(rb.src, nSrc, end, eof)
n := copy(dst[nDst:], rb.src.bytes[nSrc:end])
nSrc += n
nDst += n
if ok {
if n < rb.nsrc && !atEOF {
err = transform.ErrShortSrc
}
return nDst, nSrc, err
}
}
}

54
vendor/golang.org/x/text/unicode/norm/trie.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
type valueRange struct {
value uint16 // header: value:stride
lo, hi byte // header: lo:n
}
type sparseBlocks struct {
values []valueRange
offset []uint16
}
var nfcSparse = sparseBlocks{
values: nfcSparseValues[:],
offset: nfcSparseOffset[:],
}
var nfkcSparse = sparseBlocks{
values: nfkcSparseValues[:],
offset: nfkcSparseOffset[:],
}
var (
nfcData = newNfcTrie(0)
nfkcData = newNfkcTrie(0)
)
// lookupValue determines the type of block n and looks up the value for b.
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
// is a list of ranges with an accompanying value. Given a matching range r,
// the value for b is by r.value + (b - r.lo) * stride.
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
offset := t.offset[n]
header := t.values[offset]
lo := offset + 1
hi := lo + uint16(header.lo)
for lo < hi {
m := lo + (hi-lo)/2
r := t.values[m]
if r.lo <= b && b <= r.hi {
return r.value + uint16(b-r.lo)*header.value
}
if b < r.lo {
hi = m
} else {
lo = m + 1
}
}
return 0
}

117
vendor/golang.org/x/text/unicode/norm/triegen.go generated vendored Normal file
View file

@ -0,0 +1,117 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Trie table generator.
// Used by make*tables tools to generate a go file with trie data structures
// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
// sequence are used to lookup offsets in the index table to be used for the
// next byte. The last byte is used to index into a table with 16-bit values.
package main
import (
"fmt"
"io"
)
const maxSparseEntries = 16
type normCompacter struct {
sparseBlocks [][]uint64
sparseOffset []uint16
sparseCount int
name string
}
func mostFrequentStride(a []uint64) int {
counts := make(map[int]int)
var v int
for _, x := range a {
if stride := int(x) - v; v != 0 && stride >= 0 {
counts[stride]++
}
v = int(x)
}
var maxs, maxc int
for stride, cnt := range counts {
if cnt > maxc || (cnt == maxc && stride < maxs) {
maxs, maxc = stride, cnt
}
}
return maxs
}
func countSparseEntries(a []uint64) int {
stride := mostFrequentStride(a)
var v, count int
for _, tv := range a {
if int(tv)-v != stride {
if tv != 0 {
count++
}
}
v = int(tv)
}
return count
}
func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
if n := countSparseEntries(v); n <= maxSparseEntries {
return (n+1)*4 + 2, true
}
return 0, false
}
func (c *normCompacter) Store(v []uint64) uint32 {
h := uint32(len(c.sparseOffset))
c.sparseBlocks = append(c.sparseBlocks, v)
c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
c.sparseCount += countSparseEntries(v) + 1
return h
}
func (c *normCompacter) Handler() string {
return c.name + "Sparse.lookup"
}
func (c *normCompacter) Print(w io.Writer) (retErr error) {
p := func(f string, x ...interface{}) {
if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
retErr = err
}
}
ls := len(c.sparseBlocks)
p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
ns := c.sparseCount
p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
p("var %sSparseValues = [%d]valueRange {", c.name, ns)
for i, b := range c.sparseBlocks {
p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
var v int
stride := mostFrequentStride(b)
n := countSparseEntries(b)
p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
for i, nv := range b {
if int(nv)-v != stride {
if v != 0 {
p(",hi:%#02x},", 0x80+i-1)
}
if nv != 0 {
p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
}
}
v = int(nv)
}
if v != 0 {
p(",hi:%#02x},", 0x80+len(b)-1)
}
}
p("\n}\n\n")
return
}

202
vendor/google.golang.org/genproto/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,144 @@
// Code generated by protoc-gen-go.
// source: google/rpc/status.proto
// DO NOT EDIT!
/*
Package status is a generated protocol buffer package.
It is generated from these files:
google/rpc/status.proto
It has these top-level messages:
Status
*/
package status
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The `Status` type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs. It is used by
// [gRPC](https://github.com/grpc). The error model is designed to be:
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error message,
// and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
// error message should be a developer-facing English message that helps
// developers *understand* and *resolve* the error. If a localized user-facing
// error message is needed, put the localized message in the error details or
// localize it in the client. The optional error details may contain arbitrary
// information about the error. There is a predefined set of error detail types
// in the package `google.rpc` which can be used for common error conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting purpose.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
type Status struct {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
// A list of messages that carry the error details. There will be a
// common set of message types for APIs to use.
Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Status) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Status) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Status) GetDetails() []*google_protobuf.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Status)(nil), "google.rpc.Status")
}
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 209 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
0x00,
}

123
vendor/google.golang.org/genproto/regen.go generated vendored Normal file
View file

@ -0,0 +1,123 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
// Regen.go regenerates the genproto repository.
//
// Regen.go recursively walks through each directory named by given arguments,
// looking for all .proto files. (Symlinks are not followed.)
// If the pkg_prefix flag is not an empty string,
// any proto file without `go_package` option
// or whose option does not begin with the prefix is ignored.
// Protoc is executed on remaining files,
// one invocation per set of files declaring the same Go package.
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
)
var goPkgOptRe = regexp.MustCompile(`(?m)^option go_package = (.*);`)
func usage() {
fmt.Fprintln(os.Stderr, `usage: go run regen.go -go_out=path/to/output [-pkg_prefix=pkg/prefix] roots...
Most users will not need to run this file directly.
To regenerate this repository, run regen.sh instead.`)
flag.PrintDefaults()
}
func main() {
goOutDir := flag.String("go_out", "", "go_out argument to pass to protoc-gen-go")
pkgPrefix := flag.String("pkg_prefix", "", "only include proto files with go_package starting with this prefix")
flag.Usage = usage
flag.Parse()
if *goOutDir == "" {
log.Fatal("need go_out flag")
}
pkgFiles := make(map[string][]string)
walkFn := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.Mode().IsRegular() || !strings.HasSuffix(path, ".proto") {
return nil
}
pkg, err := goPkg(path)
if err != nil {
return err
}
pkgFiles[pkg] = append(pkgFiles[pkg], path)
return nil
}
for _, root := range flag.Args() {
if err := filepath.Walk(root, walkFn); err != nil {
log.Fatal(err)
}
}
for pkg, fnames := range pkgFiles {
if !strings.HasPrefix(pkg, *pkgPrefix) {
continue
}
if out, err := protoc(*goOutDir, flag.Args(), fnames); err != nil {
log.Fatalf("error executing protoc: %s\n%s", err, out)
}
}
}
// goPkg reports the import path declared in the given file's
// `go_package` option. If the option is missing, goPkg returns empty string.
func goPkg(fname string) (string, error) {
content, err := ioutil.ReadFile(fname)
if err != nil {
return "", err
}
var pkgName string
if match := goPkgOptRe.FindSubmatch(content); len(match) > 0 {
pn, err := strconv.Unquote(string(match[1]))
if err != nil {
return "", err
}
pkgName = pn
}
if p := strings.IndexRune(pkgName, ';'); p > 0 {
pkgName = pkgName[:p]
}
return pkgName, nil
}
// protoc executes the "protoc" command on files named in fnames,
// passing go_out and include flags specified in goOut and includes respectively.
// protoc returns combined output from stdout and stderr.
func protoc(goOut string, includes, fnames []string) ([]byte, error) {
args := []string{"--go_out=plugins=grpc:" + goOut}
for _, inc := range includes {
args = append(args, "-I", inc)
}
args = append(args, fnames...)
return exec.Command("protoc", args...).CombinedOutput()
}

View file

@ -58,7 +58,7 @@ func setDefaults(bc *BackoffConfig) {
} }
} }
func (bc BackoffConfig) backoff(retries int) (t time.Duration) { func (bc BackoffConfig) backoff(retries int) time.Duration {
if retries == 0 { if retries == 0 {
return bc.baseDelay return bc.baseDelay
} }

View file

@ -38,6 +38,7 @@ import (
"sync" "sync"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/naming" "google.golang.org/grpc/naming"
@ -315,7 +316,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
if !opts.BlockingWait { if !opts.BlockingWait {
if len(rr.addrs) == 0 { if len(rr.addrs) == 0 {
rr.mu.Unlock() rr.mu.Unlock()
err = fmt.Errorf("there is no address available") err = Errorf(codes.Unavailable, "there is no address available")
return return
} }
// Returns the next addr on rr.addrs for failfast RPCs. // Returns the next addr on rr.addrs for failfast RPCs.

View file

@ -36,12 +36,14 @@ package grpc
import ( import (
"bytes" "bytes"
"io" "io"
"math"
"time" "time"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/net/trace" "golang.org/x/net/trace"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
@ -49,9 +51,9 @@ import (
// On error, it returns the error and indicates whether the call should be retried. // On error, it returns the error and indicates whether the call should be retried.
// //
// TODO(zhaoq): Check whether the received message sequence is valid. // TODO(zhaoq): Check whether the received message sequence is valid.
func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // TODO ctx is used for stats collection and processing. It is the context passed from the application.
func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
// Try to acquire header metadata from the server if there is any. // Try to acquire header metadata from the server if there is any.
var err error
defer func() { defer func() {
if err != nil { if err != nil {
if _, ok := err.(transport.ConnectionError); !ok { if _, ok := err.(transport.ConnectionError); !ok {
@ -61,23 +63,37 @@ func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, s
}() }()
c.headerMD, err = stream.Header() c.headerMD, err = stream.Header()
if err != nil { if err != nil {
return err return
} }
p := &parser{r: stream} p := &parser{r: stream}
var inPayload *stats.InPayload
if dopts.copts.StatsHandler != nil {
inPayload = &stats.InPayload{
Client: true,
}
}
for { for {
if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil { if err = recv(p, dopts.codec, stream, dopts.dc, reply, dopts.maxMsgSize, inPayload); err != nil {
if err == io.EOF { if err == io.EOF {
break break
} }
return err return
} }
} }
if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
// Fix the order if necessary.
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
}
c.trailerMD = stream.Trailer() c.trailerMD = stream.Trailer()
if peer, ok := peer.FromContext(stream.Context()); ok {
c.peer = peer
}
return nil return nil
} }
// sendRequest writes out various information of an RPC such as Context and Message. // sendRequest writes out various information of an RPC such as Context and Message.
func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
stream, err := t.NewStream(ctx, callHdr) stream, err := t.NewStream(ctx, callHdr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -90,15 +106,27 @@ func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHd
} }
} }
}() }()
var cbuf *bytes.Buffer var (
cbuf *bytes.Buffer
outPayload *stats.OutPayload
)
if compressor != nil { if compressor != nil {
cbuf = new(bytes.Buffer) cbuf = new(bytes.Buffer)
} }
outBuf, err := encode(codec, args, compressor, cbuf) if dopts.copts.StatsHandler != nil {
outPayload = &stats.OutPayload{
Client: true,
}
}
outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
if err != nil { if err != nil {
return nil, Errorf(codes.Internal, "grpc: %v", err) return nil, Errorf(codes.Internal, "grpc: %v", err)
} }
err = t.Write(stream, outBuf, opts) err = t.Write(stream, outBuf, opts)
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
}
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
// recvResponse to get the final status. // recvResponse to get the final status.
@ -119,8 +147,16 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
return invoke(ctx, method, args, reply, cc, opts...) return invoke(ctx, method, args, reply, cc, opts...)
} }
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
c := defaultCallInfo c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok {
c.failFast = !mc.WaitForReady
if mc.Timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
defer cancel()
}
}
for _, o := range opts { for _, o := range opts {
if err := o.before(&c); err != nil { if err := o.before(&c); err != nil {
return toRPCErr(err) return toRPCErr(err)
@ -141,12 +177,32 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
defer func() { defer func() {
if err != nil { if e != nil {
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
c.traceInfo.tr.SetError() c.traceInfo.tr.SetError()
} }
}() }()
} }
sh := cc.dopts.copts.StatsHandler
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
}
defer func() {
if sh != nil {
end := &stats.End{
Client: true,
EndTime: time.Now(),
Error: e,
}
sh.HandleRPC(ctx, end)
}
}()
topts := &transport.Options{ topts := &transport.Options{
Last: true, Last: true,
Delay: false, Delay: false,
@ -168,13 +224,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if cc.dopts.cp != nil { if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type() callHdr.SendCompress = cc.dopts.cp.Type()
} }
gopts := BalancerGetOptions{ gopts := BalancerGetOptions{
BlockingWait: !c.failFast, BlockingWait: !c.failFast,
} }
t, put, err = cc.getTransport(ctx, gopts) t, put, err = cc.getTransport(ctx, gopts)
if err != nil { if err != nil {
// TODO(zhaoq): Probably revisit the error handling. // TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok { if _, ok := status.FromError(err); ok {
return err return err
} }
if err == errConnClosing || err == errConnUnavailable { if err == errConnClosing || err == errConnUnavailable {
@ -189,7 +246,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if c.traceInfo.tr != nil { if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
} }
stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts)
if err != nil { if err != nil {
if put != nil { if put != nil {
put() put()
@ -206,7 +263,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
} }
return toRPCErr(err) return toRPCErr(err)
} }
err = recvResponse(cc.dopts, t, &c, stream, reply) err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
if err != nil { if err != nil {
if put != nil { if put != nil {
put() put()
@ -228,6 +285,6 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
put() put()
put = nil put = nil
} }
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) return stream.Status().Err()
} }
} }

View file

@ -36,8 +36,8 @@ package grpc
import ( import (
"errors" "errors"
"fmt" "fmt"
"math"
"net" "net"
"strings"
"sync" "sync"
"time" "time"
@ -45,6 +45,8 @@ import (
"golang.org/x/net/trace" "golang.org/x/net/trace"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
@ -54,6 +56,8 @@ var (
ErrClientConnClosing = errors.New("grpc: the client connection is closing") ErrClientConnClosing = errors.New("grpc: the client connection is closing")
// ErrClientConnTimeout indicates that the ClientConn cannot establish the // ErrClientConnTimeout indicates that the ClientConn cannot establish the
// underlying connections within the specified timeout. // underlying connections within the specified timeout.
// DEPRECATED: Please use context.DeadlineExceeded instead. This error will be
// removed in Q1 2017.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing") ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errNoTransportSecurity indicates that there is no transport security // errNoTransportSecurity indicates that there is no transport security
@ -75,7 +79,6 @@ var (
errConnClosing = errors.New("grpc: the connection is closing") errConnClosing = errors.New("grpc: the connection is closing")
// errConnUnavailable indicates that the connection is unavailable. // errConnUnavailable indicates that the connection is unavailable.
errConnUnavailable = errors.New("grpc: the connection is unavailable") errConnUnavailable = errors.New("grpc: the connection is unavailable")
errNoAddr = errors.New("grpc: there is no address available to dial")
// minimum time to give a connection to complete // minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second minConnectTimeout = 20 * time.Second
) )
@ -83,22 +86,33 @@ var (
// dialOptions configure a Dial call. dialOptions are set by the DialOption // dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial. // values passed to Dial.
type dialOptions struct { type dialOptions struct {
unaryInt UnaryClientInterceptor unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor streamInt StreamClientInterceptor
codec Codec codec Codec
cp Compressor cp Compressor
dc Decompressor dc Decompressor
bs backoffStrategy bs backoffStrategy
balancer Balancer balancer Balancer
block bool block bool
insecure bool insecure bool
timeout time.Duration timeout time.Duration
copts transport.ConnectOptions scChan <-chan ServiceConfig
copts transport.ConnectOptions
maxMsgSize int
} }
const defaultClientMaxMsgSize = math.MaxInt32
// DialOption configures how we set up the connection. // DialOption configures how we set up the connection.
type DialOption func(*dialOptions) type DialOption func(*dialOptions)
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive.
func WithMaxMsgSize(s int) DialOption {
return func(o *dialOptions) {
o.maxMsgSize = s
}
}
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. // WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
func WithCodec(c Codec) DialOption { func WithCodec(c Codec) DialOption {
return func(o *dialOptions) { return func(o *dialOptions) {
@ -129,6 +143,13 @@ func WithBalancer(b Balancer) DialOption {
} }
} }
// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
return func(o *dialOptions) {
o.scChan = c
}
}
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay // WithBackoffMaxDelay configures the dialer to use the provided maximum delay
// when backing off after failed connection attempts. // when backing off after failed connection attempts.
func WithBackoffMaxDelay(md time.Duration) DialOption { func WithBackoffMaxDelay(md time.Duration) DialOption {
@ -199,6 +220,8 @@ func WithTimeout(d time.Duration) DialOption {
} }
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. // WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
// Temporary() method to decide if it should try to reconnect to the network address.
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
return func(o *dialOptions) { return func(o *dialOptions) {
o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
@ -210,6 +233,25 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
} }
} }
// WithStatsHandler returns a DialOption that specifies the stats handler
// for all the RPCs and underlying network connections in this ClientConn.
func WithStatsHandler(h stats.Handler) DialOption {
return func(o *dialOptions) {
o.copts.StatsHandler = h
}
}
// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors.
// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
// address and won't try to reconnect.
// The default value of FailOnNonTempDialError is false.
// This is an EXPERIMENTAL API.
func FailOnNonTempDialError(f bool) DialOption {
return func(o *dialOptions) {
o.copts.FailOnNonTempDialError = f
}
}
// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. // WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
func WithUserAgent(s string) DialOption { func WithUserAgent(s string) DialOption {
return func(o *dialOptions) { return func(o *dialOptions) {
@ -217,6 +259,13 @@ func WithUserAgent(s string) DialOption {
} }
} }
// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
return func(o *dialOptions) {
o.copts.KeepaliveParams = kp
}
}
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. // WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
return func(o *dialOptions) { return func(o *dialOptions) {
@ -231,6 +280,15 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
} }
} }
// WithAuthority returns a DialOption that specifies the value to be used as
// the :authority pseudo-header. This value only works with WithInsecure and
// has no effect if TransportCredentials are present.
func WithAuthority(a string) DialOption {
return func(o *dialOptions) {
o.copts.Authority = a
}
}
// Dial creates a client connection to the given target. // Dial creates a client connection to the given target.
func Dial(target string, opts ...DialOption) (*ClientConn, error) { func Dial(target string, opts ...DialOption) (*ClientConn, error) {
return DialContext(context.Background(), target, opts...) return DialContext(context.Background(), target, opts...)
@ -247,6 +305,25 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
conns: make(map[Address]*addrConn), conns: make(map[Address]*addrConn),
} }
cc.ctx, cc.cancel = context.WithCancel(context.Background()) cc.ctx, cc.cancel = context.WithCancel(context.Background())
cc.dopts.maxMsgSize = defaultClientMaxMsgSize
for _, opt := range opts {
opt(&cc.dopts)
}
cc.mkp = cc.dopts.copts.KeepaliveParams
grpcUA := "grpc-go/" + Version
if cc.dopts.copts.UserAgent != "" {
cc.dopts.copts.UserAgent += " " + grpcUA
} else {
cc.dopts.copts.UserAgent = grpcUA
}
if cc.dopts.timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
defer cancel()
}
defer func() { defer func() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -259,10 +336,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
} }
}() }()
for _, opt := range opts { if cc.dopts.scChan != nil {
opt(&cc.dopts) // Wait for the initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
}
case <-ctx.Done():
return nil, ctx.Err()
}
} }
// Set defaults. // Set defaults.
if cc.dopts.codec == nil { if cc.dopts.codec == nil {
cc.dopts.codec = protoCodec{} cc.dopts.codec = protoCodec{}
@ -273,21 +357,18 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
creds := cc.dopts.copts.TransportCredentials creds := cc.dopts.copts.TransportCredentials
if creds != nil && creds.Info().ServerName != "" { if creds != nil && creds.Info().ServerName != "" {
cc.authority = creds.Info().ServerName cc.authority = creds.Info().ServerName
} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
cc.authority = cc.dopts.copts.Authority
} else { } else {
colonPos := strings.LastIndex(target, ":") cc.authority = target
if colonPos == -1 {
colonPos = len(target)
}
cc.authority = target[:colonPos]
} }
var ok bool
waitC := make(chan error, 1) waitC := make(chan error, 1)
go func() { go func() {
var addrs []Address defer close(waitC)
if cc.dopts.balancer == nil { if cc.dopts.balancer == nil && cc.sc.LB != nil {
// Connect to target directly if balancer is nil. cc.dopts.balancer = cc.sc.LB
addrs = append(addrs, Address{Addr: target}) }
} else { if cc.dopts.balancer != nil {
var credsClone credentials.TransportCredentials var credsClone credentials.TransportCredentials
if creds != nil { if creds != nil {
credsClone = creds.Clone() credsClone = creds.Clone()
@ -300,29 +381,23 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
return return
} }
ch := cc.dopts.balancer.Notify() ch := cc.dopts.balancer.Notify()
if ch == nil { if ch != nil {
// There is no name resolver installed. if cc.dopts.block {
addrs = append(addrs, Address{Addr: target}) doneChan := make(chan struct{})
} else { go cc.lbWatcher(doneChan)
addrs, ok = <-ch <-doneChan
if !ok || len(addrs) == 0 { } else {
waitC <- errNoAddr go cc.lbWatcher(nil)
return
} }
}
}
for _, a := range addrs {
if err := cc.resetAddrConn(a, false, nil); err != nil {
waitC <- err
return return
} }
} }
close(waitC) // No balancer, or no resolver within the balancer. Connect directly.
if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil {
waitC <- err
return
}
}() }()
var timeoutCh <-chan time.Time
if cc.dopts.timeout > 0 {
timeoutCh = time.After(cc.dopts.timeout)
}
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, ctx.Err()
@ -330,14 +405,12 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if err != nil { if err != nil {
return nil, err return nil, err
} }
case <-timeoutCh:
return nil, ErrClientConnTimeout
} }
// If balancer is nil or balancer.Notify() is nil, ok will be false here.
// The lbWatcher goroutine will not be created. if cc.dopts.scChan != nil {
if ok { go cc.scWatcher()
go cc.lbWatcher()
} }
return cc, nil return cc, nil
} }
@ -384,10 +457,16 @@ type ClientConn struct {
dopts dialOptions dopts dialOptions
mu sync.RWMutex mu sync.RWMutex
sc ServiceConfig
conns map[Address]*addrConn conns map[Address]*addrConn
// Keepalive parameter can be udated if a GoAway is received.
mkp keepalive.ClientParameters
} }
func (cc *ClientConn) lbWatcher() { // lbWatcher watches the Notify channel of the balancer in cc and manages
// connections accordingly. If doneChan is not nil, it is closed after the
// first successfull connection is made.
func (cc *ClientConn) lbWatcher(doneChan chan struct{}) {
for addrs := range cc.dopts.balancer.Notify() { for addrs := range cc.dopts.balancer.Notify() {
var ( var (
add []Address // Addresses need to setup connections. add []Address // Addresses need to setup connections.
@ -414,7 +493,15 @@ func (cc *ClientConn) lbWatcher() {
} }
cc.mu.Unlock() cc.mu.Unlock()
for _, a := range add { for _, a := range add {
cc.resetAddrConn(a, true, nil) if doneChan != nil {
err := cc.resetAddrConn(a, true, nil)
if err == nil {
close(doneChan)
doneChan = nil
}
} else {
cc.resetAddrConn(a, false, nil)
}
} }
for _, c := range del { for _, c := range del {
c.tearDown(errConnDrain) c.tearDown(errConnDrain)
@ -422,15 +509,36 @@ func (cc *ClientConn) lbWatcher() {
} }
} }
func (cc *ClientConn) scWatcher() {
for {
select {
case sc, ok := <-cc.dopts.scChan:
if !ok {
return
}
cc.mu.Lock()
// TODO: load balance policy runtime change is ignored.
// We may revist this decision in the future.
cc.sc = sc
cc.mu.Unlock()
case <-cc.ctx.Done():
return
}
}
}
// resetAddrConn creates an addrConn for addr and adds it to cc.conns. // resetAddrConn creates an addrConn for addr and adds it to cc.conns.
// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. // If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
// If tearDownErr is nil, errConnDrain will be used instead. // If tearDownErr is nil, errConnDrain will be used instead.
func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error {
ac := &addrConn{ ac := &addrConn{
cc: cc, cc: cc,
addr: addr, addr: addr,
dopts: cc.dopts, dopts: cc.dopts,
} }
cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = cc.mkp
cc.mu.RUnlock()
ac.ctx, ac.cancel = context.WithCancel(cc.ctx) ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
ac.stateCV = sync.NewCond(&ac.mu) ac.stateCV = sync.NewCond(&ac.mu)
if EnableTracing { if EnableTracing {
@ -475,8 +583,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
stale.tearDown(tearDownErr) stale.tearDown(tearDownErr)
} }
} }
// skipWait may overwrite the decision in ac.dopts.block. if block {
if ac.dopts.block && !skipWait {
if err := ac.resetTransport(false); err != nil { if err := ac.resetTransport(false); err != nil {
if err != errConnClosing { if err != errConnClosing {
// Tear down ac and delete it from cc.conns. // Tear down ac and delete it from cc.conns.
@ -509,6 +616,14 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
return nil return nil
} }
// TODO: Avoid the locking here.
func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) {
cc.mu.RLock()
defer cc.mu.RUnlock()
m, ok = cc.sc.Methods[method]
return
}
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
var ( var (
ac *addrConn ac *addrConn
@ -605,6 +720,20 @@ type addrConn struct {
tearDownErr error tearDownErr error
} }
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
switch r {
case transport.TooManyPings:
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
if v > ac.cc.mkp.Time {
ac.cc.mkp.Time = v
}
ac.cc.mu.Unlock()
}
}
// printf records an event in ac's event log, unless ac has been closed. // printf records an event in ac's event log, unless ac has been closed.
// REQUIRES ac.mu is held. // REQUIRES ac.mu is held.
func (ac *addrConn) printf(format string, a ...interface{}) { func (ac *addrConn) printf(format string, a ...interface{}) {
@ -684,7 +813,13 @@ func (ac *addrConn) resetTransport(closeTransport bool) error {
} }
ctx, cancel := context.WithTimeout(ac.ctx, timeout) ctx, cancel := context.WithTimeout(ac.ctx, timeout)
connectTime := time.Now() connectTime := time.Now()
newTransport, err := transport.NewClientTransport(ctx, ac.addr.Addr, ac.dopts.copts) sinfo := transport.TargetInfo{
Addr: ac.addr.Addr,
Metadata: ac.addr.Metadata,
}
newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts)
// Don't call cancel in success path due to a race in Go 1.6:
// https://github.com/golang/go/issues/15078.
if err != nil { if err != nil {
cancel() cancel()
@ -755,6 +890,7 @@ func (ac *addrConn) transportMonitor() {
} }
return return
case <-t.GoAway(): case <-t.GoAway():
ac.adjustParams(t.GetGoAwayReason())
// If GoAway happens without any network I/O error, ac is closed without shutting down the // If GoAway happens without any network I/O error, ac is closed without shutting down the
// underlying transport (the transport will be closed when all the pending RPCs finished or // underlying transport (the transport will be closed when all the pending RPCs finished or
// failed.). // failed.).
@ -763,9 +899,9 @@ func (ac *addrConn) transportMonitor() {
// In both cases, a new ac is created. // In both cases, a new ac is created.
select { select {
case <-t.Error(): case <-t.Error():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) ac.cc.resetAddrConn(ac.addr, false, errNetworkIO)
default: default:
ac.cc.resetAddrConn(ac.addr, true, errConnDrain) ac.cc.resetAddrConn(ac.addr, false, errConnDrain)
} }
return return
case <-t.Error(): case <-t.Error():
@ -774,7 +910,8 @@ func (ac *addrConn) transportMonitor() {
t.Close() t.Close()
return return
case <-t.GoAway(): case <-t.GoAway():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) ac.adjustParams(t.GetGoAwayReason())
ac.cc.resetAddrConn(ac.addr, false, errNetworkIO)
return return
default: default:
} }
@ -803,7 +940,7 @@ func (ac *addrConn) transportMonitor() {
} }
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or // wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
// iv) transport is in TransientFailure and there's no balancer/failfast is true. // iv) transport is in TransientFailure and there is a balancer/failfast is true.
func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
for { for {
ac.mu.Lock() ac.mu.Lock()

118
vendor/google.golang.org/grpc/codec.go generated vendored Normal file
View file

@ -0,0 +1,118 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"math"
"sync"
"github.com/golang/protobuf/proto"
)
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
// a Codec's methods can be called from concurrent goroutines.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
// String returns the name of the Codec implementation. The returned
// string will be used as part of content type in transmission.
String() string
}
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
type protoCodec struct {
}
type cachedProtoBuffer struct {
lastMarshaledSize uint32
proto.Buffer
}
func capToMaxInt32(val int) uint32 {
if val > math.MaxInt32 {
return uint32(math.MaxInt32)
}
return uint32(val)
}
func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
protoMsg := v.(proto.Message)
newSlice := make([]byte, 0, cb.lastMarshaledSize)
cb.SetBuf(newSlice)
cb.Reset()
if err := cb.Marshal(protoMsg); err != nil {
return nil, err
}
out := cb.Bytes()
cb.lastMarshaledSize = capToMaxInt32(len(out))
return out, nil
}
func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
cb := protoBufferPool.Get().(*cachedProtoBuffer)
out, err := p.marshal(v, cb)
// put back buffer and lose the ref to the slice
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return out, err
}
func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
cb := protoBufferPool.Get().(*cachedProtoBuffer)
cb.SetBuf(data)
err := cb.Unmarshal(v.(proto.Message))
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return err
}
func (protoCodec) String() string {
return "proto"
}
var (
protoBufferPool = &sync.Pool{
New: func() interface{} {
return &cachedProtoBuffer{
Buffer: proto.Buffer{},
lastMarshaledSize: 16,
}
},
}
)

View file

@ -102,6 +102,10 @@ type TransportCredentials interface {
// authentication protocol on rawConn for clients. It returns the authenticated // authentication protocol on rawConn for clients. It returns the authenticated
// connection and the corresponding auth information about the connection. // connection and the corresponding auth information about the connection.
// Implementations must use the provided context to implement timely cancellation. // Implementations must use the provided context to implement timely cancellation.
// gRPC will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// If the returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns // ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about // the authenticated connection and the corresponding auth information about
@ -165,9 +169,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net
case <-ctx.Done(): case <-ctx.Done():
return nil, nil, ctx.Err() return nil, nil, ctx.Err()
} }
// TODO(zhaoq): Omit the auth info for client now. It is more for return conn, TLSInfo{conn.ConnectionState()}, nil
// information than anything else.
return conn, nil, nil
} }
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {

View file

@ -1,4 +1,5 @@
// +build go1.7 // +build go1.7
// +build !go1.8
/* /*
* *
@ -44,8 +45,6 @@ import (
// contains a mutex and must not be copied. // contains a mutex and must not be copied.
// //
// If cfg is nil, a new zero tls.Config is returned. // If cfg is nil, a new zero tls.Config is returned.
//
// TODO replace this function with official clone function.
func cloneTLSConfig(cfg *tls.Config) *tls.Config { func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil { if cfg == nil {
return &tls.Config{} return &tls.Config{}

View file

@ -1,7 +1,8 @@
// +build !go1.6 // +build go1.8
/* /*
* Copyright 2016, Google Inc. *
* Copyright 2017, Google Inc.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -32,20 +33,21 @@
* *
*/ */
package transport package credentials
import ( import (
"net" "crypto/tls"
"time"
"golang.org/x/net/context"
) )
// dialContext connects to the address on the named network. // cloneTLSConfig returns a shallow clone of the exported
func dialContext(ctx context.Context, network, address string) (net.Conn, error) { // fields of cfg, ignoring the unexported sync.Once, which
var dialer net.Dialer // contains a mutex and must not be copied.
if deadline, ok := ctx.Deadline(); ok { //
dialer.Timeout = deadline.Sub(time.Now()) // If cfg is nil, a new zero tls.Config is returned.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
} }
return dialer.Dial(network, address)
return cfg.Clone()
} }

View file

@ -44,8 +44,6 @@ import (
// contains a mutex and must not be copied. // contains a mutex and must not be copied.
// //
// If cfg is nil, a new zero tls.Config is returned. // If cfg is nil, a new zero tls.Config is returned.
//
// TODO replace this function with official clone function.
func cloneTLSConfig(cfg *tls.Config) *tls.Config { func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil { if cfg == nil {
return &tls.Config{} return &tls.Config{}

View file

@ -40,7 +40,7 @@ import (
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
// and it is the responsibility of the interceptor to call it. // and it is the responsibility of the interceptor to call it.
// This is the EXPERIMENTAL API. // This is the EXPERIMENTAL API.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error

80
vendor/google.golang.org/grpc/keepalive/keepalive.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package keepalive defines configurable parameters for point-to-point healthcheck.
package keepalive
import (
"time"
)
// ClientParameters is used to set keepalive parameters on the client-side.
// These configure how the client will actively probe to notice when a connection broken
// and to cause activity so intermediaries are aware the connection is still in use.
// Make sure these parameters are set in coordination with the keepalive policy on the server,
// as incompatible settings can result in closing of connection.
type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
Time time.Duration // The current default value is infinity.
// After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
// If true, client runs keepalive checks even with no active RPCs.
PermitWithoutStream bool // false by default.
}
// ServerParameters is used to set keepalive and max-age parameters on the server-side.
type ServerParameters struct {
// MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
// Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
MaxConnectionIdle time.Duration // The current default value is infinity.
// MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
// A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
MaxConnectionAge time.Duration // The current default value is infinity.
// MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
MaxConnectionAgeGrace time.Duration // The current default value is infinity.
// After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
Time time.Duration // The current default value is 2 hours.
// After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
}
// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
// Server will close connection with a client that violates this policy.
type EnforcementPolicy struct {
// MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
MinTime time.Duration // The current default value is 5 minutes.
// If true, server expects keepalive pings even when there are no active streams(RPCs).
PermitWithoutStream bool // false by default.
}

View file

@ -32,6 +32,7 @@
*/ */
// Package metadata define the structure of the metadata supported by gRPC library. // Package metadata define the structure of the metadata supported by gRPC library.
// Please refer to http://www.grpc.io/docs/guides/wire.html for more information about custom-metadata.
package metadata // import "google.golang.org/grpc/metadata" package metadata // import "google.golang.org/grpc/metadata"
import ( import (
@ -82,6 +83,7 @@ func DecodeKeyValue(k, v string) (string, string, error) {
type MD map[string][]string type MD map[string][]string
// New creates a MD from given key-value map. // New creates a MD from given key-value map.
// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be applied Base64 encoding.
func New(m map[string]string) MD { func New(m map[string]string) MD {
md := MD{} md := MD{}
for k, v := range m { for k, v := range m {
@ -93,6 +95,7 @@ func New(m map[string]string) MD {
// Pairs returns an MD formed by the mapping of key, value ... // Pairs returns an MD formed by the mapping of key, value ...
// Pairs panics if len(kv) is odd. // Pairs panics if len(kv) is odd.
// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be appplied Base64 encoding.
func Pairs(kv ...string) MD { func Pairs(kv ...string) MD {
if len(kv)%2 == 1 { if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
@ -133,15 +136,41 @@ func Join(mds ...MD) MD {
return out return out
} }
type mdKey struct{} type mdIncomingKey struct{}
type mdOutgoingKey struct{}
// NewContext creates a new context with md attached. // NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated.
func NewContext(ctx context.Context, md MD) context.Context { func NewContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdKey{}, md) return NewOutgoingContext(ctx, md)
} }
// FromContext returns the MD in ctx if it exists. // NewIncomingContext creates a new context with incoming md attached.
func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
}
// NewOutgoingContext creates a new context with outgoing md attached.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, md)
}
// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated.
func FromContext(ctx context.Context) (md MD, ok bool) { func FromContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdKey{}).(MD) return FromIncomingContext(ctx)
}
// FromIncomingContext returns the incoming MD in ctx if it exists. The
// returned md should be immutable, writing to it may cause races.
// Modification should be made to the copies of the returned md.
func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdIncomingKey{}).(MD)
return
}
// FromOutgoingContext returns the outgoing MD in ctx if it exists. The
// returned md should be immutable, writing to it may cause races.
// Modification should be made to the copies of the returned md.
func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdOutgoingKey{}).(MD)
return return
} }

View file

@ -37,45 +37,21 @@ import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"encoding/binary" "encoding/binary"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
"os" "os"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
// Codec defines the interface gRPC uses to encode and decode messages.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
// String returns the name of the Codec implementation. The returned
// string will be used as part of content type in transmission.
String() string
}
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
type protoCodec struct{}
func (protoCodec) Marshal(v interface{}) ([]byte, error) {
return proto.Marshal(v.(proto.Message))
}
func (protoCodec) Unmarshal(data []byte, v interface{}) error {
return proto.Unmarshal(data, v.(proto.Message))
}
func (protoCodec) String() string {
return "proto"
}
// Compressor defines the interface gRPC uses to compress a message. // Compressor defines the interface gRPC uses to compress a message.
type Compressor interface { type Compressor interface {
// Do compresses p into w. // Do compresses p into w.
@ -138,6 +114,7 @@ type callInfo struct {
failFast bool failFast bool
headerMD metadata.MD headerMD metadata.MD
trailerMD metadata.MD trailerMD metadata.MD
peer *peer.Peer
traceInfo traceInfo // in trace.go traceInfo traceInfo // in trace.go
} }
@ -181,12 +158,22 @@ func Trailer(md *metadata.MD) CallOption {
}) })
} }
// Peer returns a CallOption that retrieves peer information for a
// unary RPC.
func Peer(peer *peer.Peer) CallOption {
return afterCall(func(c *callInfo) {
if c.peer != nil {
*peer = *c.peer
}
})
}
// FailFast configures the action to take when an RPC is attempted on broken // FailFast configures the action to take when an RPC is attempted on broken
// connections or unreachable servers. If failfast is true, the RPC will fail // connections or unreachable servers. If failfast is true, the RPC will fail
// immediately. Otherwise, the RPC client will block the call until a // immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will retry // connection is available (or the call is canceled or times out) and will retry
// the call if it fails due to a transient error. Please refer to // the call if it fails due to a transient error. Please refer to
// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md // https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true.
func FailFast(failFast bool) CallOption { func FailFast(failFast bool) CallOption {
return beforeCall(func(c *callInfo) error { return beforeCall(func(c *callInfo) error {
c.failFast = failFast c.failFast = failFast
@ -255,9 +242,11 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro
// encode serializes msg and prepends the message header. If msg is nil, it // encode serializes msg and prepends the message header. If msg is nil, it
// generates the message header of 0 message length. // generates the message header of 0 message length.
func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) {
var b []byte var (
var length uint b []byte
length uint
)
if msg != nil { if msg != nil {
var err error var err error
// TODO(zhaoq): optimize to reduce memory alloc and copying. // TODO(zhaoq): optimize to reduce memory alloc and copying.
@ -265,6 +254,12 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte
if err != nil { if err != nil {
return nil, err return nil, err
} }
if outPayload != nil {
outPayload.Payload = msg
// TODO truncate large payload.
outPayload.Data = b
outPayload.Length = len(b)
}
if cp != nil { if cp != nil {
if err := cp.Do(cbuf, b); err != nil { if err := cp.Do(cbuf, b); err != nil {
return nil, err return nil, err
@ -295,6 +290,10 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte
// Copy encoded msg to buf // Copy encoded msg to buf
copy(buf[5:], b) copy(buf[5:], b)
if outPayload != nil {
outPayload.WireLength = len(buf)
}
return buf, nil return buf, nil
} }
@ -311,11 +310,14 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er
return nil return nil
} }
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int) error { func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error {
pf, d, err := p.recvMsg(maxMsgSize) pf, d, err := p.recvMsg(maxMsgSize)
if err != nil { if err != nil {
return err return err
} }
if inPayload != nil {
inPayload.WireLength = len(d)
}
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
return err return err
} }
@ -333,91 +335,67 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
if err := c.Unmarshal(d, m); err != nil { if err := c.Unmarshal(d, m); err != nil {
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
} }
if inPayload != nil {
inPayload.RecvTime = time.Now()
inPayload.Payload = m
// TODO truncate large payload.
inPayload.Data = d
inPayload.Length = len(d)
}
return nil return nil
} }
// rpcError defines the status from an RPC.
type rpcError struct {
code codes.Code
desc string
}
func (e *rpcError) Error() string {
return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc)
}
// Code returns the error code for err if it was produced by the rpc system. // Code returns the error code for err if it was produced by the rpc system.
// Otherwise, it returns codes.Unknown. // Otherwise, it returns codes.Unknown.
//
// Deprecated; use status.FromError and Code method instead.
func Code(err error) codes.Code { func Code(err error) codes.Code {
if err == nil { if s, ok := status.FromError(err); ok {
return codes.OK return s.Code()
}
if e, ok := err.(*rpcError); ok {
return e.code
} }
return codes.Unknown return codes.Unknown
} }
// ErrorDesc returns the error description of err if it was produced by the rpc system. // ErrorDesc returns the error description of err if it was produced by the rpc system.
// Otherwise, it returns err.Error() or empty string when err is nil. // Otherwise, it returns err.Error() or empty string when err is nil.
//
// Deprecated; use status.FromError and Message method instead.
func ErrorDesc(err error) string { func ErrorDesc(err error) string {
if err == nil { if s, ok := status.FromError(err); ok {
return "" return s.Message()
}
if e, ok := err.(*rpcError); ok {
return e.desc
} }
return err.Error() return err.Error()
} }
// Errorf returns an error containing an error code and a description; // Errorf returns an error containing an error code and a description;
// Errorf returns nil if c is OK. // Errorf returns nil if c is OK.
//
// Deprecated; use status.Errorf instead.
func Errorf(c codes.Code, format string, a ...interface{}) error { func Errorf(c codes.Code, format string, a ...interface{}) error {
if c == codes.OK { return status.Errorf(c, format, a...)
return nil
}
return &rpcError{
code: c,
desc: fmt.Sprintf(format, a...),
}
} }
// toRPCErr converts an error into a rpcError. // toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error { func toRPCErr(err error) error {
switch e := err.(type) { if _, ok := status.FromError(err); ok {
case *rpcError:
return err return err
}
switch e := err.(type) {
case transport.StreamError: case transport.StreamError:
return &rpcError{ return status.Error(e.Code, e.Desc)
code: e.Code,
desc: e.Desc,
}
case transport.ConnectionError: case transport.ConnectionError:
return &rpcError{ return status.Error(codes.Internal, e.Desc)
code: codes.Internal,
desc: e.Desc,
}
default: default:
switch err { switch err {
case context.DeadlineExceeded: case context.DeadlineExceeded:
return &rpcError{ return status.Error(codes.DeadlineExceeded, err.Error())
code: codes.DeadlineExceeded,
desc: err.Error(),
}
case context.Canceled: case context.Canceled:
return &rpcError{ return status.Error(codes.Canceled, err.Error())
code: codes.Canceled,
desc: err.Error(),
}
case ErrClientConnClosing: case ErrClientConnClosing:
return &rpcError{ return status.Error(codes.FailedPrecondition, err.Error())
code: codes.FailedPrecondition,
desc: err.Error(),
}
} }
} }
return Errorf(codes.Unknown, "%v", err) return status.Error(codes.Unknown, err.Error())
} }
// convertCode converts a standard Go error into its canonical code. Note that // convertCode converts a standard Go error into its canonical code. Note that
@ -448,10 +426,51 @@ func convertCode(err error) codes.Code {
return codes.Unknown return codes.Unknown
} }
// SupportPackageIsVersion3 is referenced from generated protocol buffer files // MethodConfig defines the configuration recommended by the service providers for a
// particular method.
// This is EXPERIMENTAL and subject to change.
type MethodConfig struct {
// WaitForReady indicates whether RPCs sent to this method should wait until
// the connection is ready by default (!failfast). The value specified via the
// gRPC client API will override the value set here.
WaitForReady bool
// Timeout is the default timeout for RPCs sent to this method. The actual
// deadline used will be the minimum of the value specified here and the value
// set by the application via the gRPC client API. If either one is not set,
// then the other will be used. If neither is set, then the RPC has no deadline.
Timeout time.Duration
// MaxReqSize is the maximum allowed payload size for an individual request in a
// stream (client->server) in bytes. The size which is measured is the serialized
// payload after per-message compression (but before stream compression) in bytes.
// The actual value used is the minumum of the value specified here and the value set
// by the application via the gRPC client API. If either one is not set, then the other
// will be used. If neither is set, then the built-in default is used.
// TODO: support this.
MaxReqSize uint32
// MaxRespSize is the maximum allowed payload size for an individual response in a
// stream (server->client) in bytes.
// TODO: support this.
MaxRespSize uint32
}
// ServiceConfig is provided by the service provider and contains parameters for how
// clients that connect to the service should behave.
// This is EXPERIMENTAL and subject to change.
type ServiceConfig struct {
// LB is the load balancer the service providers recommends. The balancer specified
// via grpc.WithBalancer will override this.
LB Balancer
// Methods contains a map for the methods in this service.
Methods map[string]MethodConfig
}
// SupportPackageIsVersion4 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the grpc package. // to assert that that code is compatible with this version of the grpc package.
// //
// This constant may be renamed in the future if a change in the generated code // This constant may be renamed in the future if a change in the generated code
// requires a synchronised update of grpc-go and protoc-gen-go. This constant // requires a synchronised update of grpc-go and protoc-gen-go. This constant
// should not be referenced from any other code. // should not be referenced from any other code.
const SupportPackageIsVersion3 = true const SupportPackageIsVersion4 = true
// Version is the current grpc version.
const Version = "1.3.0-dev"

View file

@ -53,7 +53,11 @@ import (
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal" "google.golang.org/grpc/internal"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
@ -89,10 +93,12 @@ type service struct {
type Server struct { type Server struct {
opts options opts options
mu sync.Mutex // guards following mu sync.Mutex // guards following
lis map[net.Listener]bool lis map[net.Listener]bool
conns map[io.Closer]bool conns map[io.Closer]bool
drain bool drain bool
ctx context.Context
cancel context.CancelFunc
// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
// and all the transport goes away. // and all the transport goes away.
cv *sync.Cond cv *sync.Cond
@ -108,8 +114,13 @@ type options struct {
maxMsgSize int maxMsgSize int
unaryInt UnaryServerInterceptor unaryInt UnaryServerInterceptor
streamInt StreamServerInterceptor streamInt StreamServerInterceptor
inTapHandle tap.ServerInHandle
statsHandler stats.Handler
maxConcurrentStreams uint32 maxConcurrentStreams uint32
useHandlerImpl bool // use http.Handler-based server useHandlerImpl bool // use http.Handler-based server
unknownStreamDesc *StreamDesc
keepaliveParams keepalive.ServerParameters
keepalivePolicy keepalive.EnforcementPolicy
} }
var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit
@ -117,6 +128,20 @@ var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size l
// A ServerOption sets options. // A ServerOption sets options.
type ServerOption func(*options) type ServerOption func(*options)
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
return func(o *options) {
o.keepaliveParams = kp
}
}
// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
return func(o *options) {
o.keepalivePolicy = kep
}
}
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
func CustomCodec(codec Codec) ServerOption { func CustomCodec(codec Codec) ServerOption {
return func(o *options) { return func(o *options) {
@ -184,6 +209,42 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
} }
} }
// InTapHandle returns a ServerOption that sets the tap handle for all the server
// transport to be created. Only one can be installed.
func InTapHandle(h tap.ServerInHandle) ServerOption {
return func(o *options) {
if o.inTapHandle != nil {
panic("The tap handle has been set.")
}
o.inTapHandle = h
}
}
// StatsHandler returns a ServerOption that sets the stats handler for the server.
func StatsHandler(h stats.Handler) ServerOption {
return func(o *options) {
o.statsHandler = h
}
}
// UnknownServiceHandler returns a ServerOption that allows for adding a custom
// unknown service handler. The provided method is a bidi-streaming RPC service
// handler that will be invoked instead of returning the the "unimplemented" gRPC
// error whenever a request is received for an unregistered service or method.
// The handling function has full access to the Context of the request and the
// stream, and the invocation passes through interceptors.
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
return func(o *options) {
o.unknownStreamDesc = &StreamDesc{
StreamName: "unknown_service_handler",
Handler: streamHandler,
// We need to assume that the users of the streamHandler will want to use both.
ClientStreams: true,
ServerStreams: true,
}
}
}
// NewServer creates a gRPC server which has no service registered and has not // NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet. // started to accept requests yet.
func NewServer(opt ...ServerOption) *Server { func NewServer(opt ...ServerOption) *Server {
@ -203,6 +264,7 @@ func NewServer(opt ...ServerOption) *Server {
m: make(map[string]*service), m: make(map[string]*service),
} }
s.cv = sync.NewCond(&s.mu) s.cv = sync.NewCond(&s.mu)
s.ctx, s.cancel = context.WithCancel(context.Background())
if EnableTracing { if EnableTracing {
_, file, line, _ := runtime.Caller(1) _, file, line, _ := runtime.Caller(1)
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
@ -324,8 +386,9 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
// Serve accepts incoming connections on the listener lis, creating a new // Serve accepts incoming connections on the listener lis, creating a new
// ServerTransport and service goroutine for each. The service goroutines // ServerTransport and service goroutine for each. The service goroutines
// read gRPC requests and then call the registered handlers to reply to them. // read gRPC requests and then call the registered handlers to reply to them.
// Serve returns when lis.Accept fails. lis will be closed when // Serve returns when lis.Accept fails with fatal errors. lis will be closed when
// this method returns. // this method returns.
// Serve always returns non-nil error.
func (s *Server) Serve(lis net.Listener) error { func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock() s.mu.Lock()
s.printf("serving") s.printf("serving")
@ -344,14 +407,38 @@ func (s *Server) Serve(lis net.Listener) error {
} }
s.mu.Unlock() s.mu.Unlock()
}() }()
var tempDelay time.Duration // how long to sleep on accept failure
for { for {
rawConn, err := lis.Accept() rawConn, err := lis.Accept()
if err != nil { if err != nil {
if ne, ok := err.(interface {
Temporary() bool
}); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
s.mu.Lock()
s.printf("Accept error: %v; retrying in %v", err, tempDelay)
s.mu.Unlock()
select {
case <-time.After(tempDelay):
case <-s.ctx.Done():
}
continue
}
s.mu.Lock() s.mu.Lock()
s.printf("done serving; Accept = %v", err) s.printf("done serving; Accept = %v", err)
s.mu.Unlock() s.mu.Unlock()
return err return err
} }
tempDelay = 0
// Start a new goroutine to deal with rawConn // Start a new goroutine to deal with rawConn
// so we don't stall this Accept loop goroutine. // so we don't stall this Accept loop goroutine.
go s.handleRawConn(rawConn) go s.handleRawConn(rawConn)
@ -385,17 +472,25 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
if s.opts.useHandlerImpl { if s.opts.useHandlerImpl {
s.serveUsingHandler(conn) s.serveUsingHandler(conn)
} else { } else {
s.serveNewHTTP2Transport(conn, authInfo) s.serveHTTP2Transport(conn, authInfo)
} }
} }
// serveNewHTTP2Transport sets up a new http/2 transport (using the // serveHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go) and // gRPC http2 server transport in transport/http2_server.go) and
// serves streams on it. // serves streams on it.
// This is run in its own goroutine (it does network I/O in // This is run in its own goroutine (it does network I/O in
// transport.NewServerTransport). // transport.NewServerTransport).
func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) config := &transport.ServerConfig{
MaxStreams: s.opts.maxConcurrentStreams,
AuthInfo: authInfo,
InTapHandle: s.opts.inTapHandle,
StatsHandler: s.opts.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
}
st, err := transport.NewServerTransport("http2", c, config)
if err != nil { if err != nil {
s.mu.Lock() s.mu.Lock()
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
@ -421,6 +516,12 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
defer wg.Done() defer wg.Done()
s.handleStream(st, stream, s.traceInfo(st, stream)) s.handleStream(st, stream, s.traceInfo(st, stream))
}() }()
}, func(ctx context.Context, method string) context.Context {
if !EnableTracing {
return ctx
}
tr := trace.New("grpc.Recv."+methodFamily(method), method)
return trace.NewContext(ctx, tr)
}) })
wg.Wait() wg.Wait()
} }
@ -470,15 +571,17 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. // traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
// If tracing is not enabled, it returns nil. // If tracing is not enabled, it returns nil.
func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
if !EnableTracing { tr, ok := trace.FromContext(stream.Context())
if !ok {
return nil return nil
} }
trInfo = &traceInfo{ trInfo = &traceInfo{
tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), tr: tr,
} }
trInfo.firstLine.client = false trInfo.firstLine.client = false
trInfo.firstLine.remoteAddr = st.RemoteAddr() trInfo.firstLine.remoteAddr = st.RemoteAddr()
stream.TraceContext(trInfo.tr)
if dl, ok := stream.Context().Deadline(); ok { if dl, ok := stream.Context().Deadline(); ok {
trInfo.firstLine.deadline = dl.Sub(time.Now()) trInfo.firstLine.deadline = dl.Sub(time.Now())
} }
@ -500,16 +603,22 @@ func (s *Server) removeConn(c io.Closer) {
defer s.mu.Unlock() defer s.mu.Unlock()
if s.conns != nil { if s.conns != nil {
delete(s.conns, c) delete(s.conns, c)
s.cv.Signal() s.cv.Broadcast()
} }
} }
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
var cbuf *bytes.Buffer var (
cbuf *bytes.Buffer
outPayload *stats.OutPayload
)
if cp != nil { if cp != nil {
cbuf = new(bytes.Buffer) cbuf = new(bytes.Buffer)
} }
p, err := encode(s.opts.codec, msg, cp, cbuf) if s.opts.statsHandler != nil {
outPayload = &stats.OutPayload{}
}
p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
if err != nil { if err != nil {
// This typically indicates a fatal issue (e.g., memory // This typically indicates a fatal issue (e.g., memory
// corruption or hardware faults) the application program // corruption or hardware faults) the application program
@ -520,10 +629,33 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
// the optimal option. // the optimal option.
grpclog.Fatalf("grpc: Server failed to encode response %v", err) grpclog.Fatalf("grpc: Server failed to encode response %v", err)
} }
return t.Write(stream, p, opts) err = t.Write(stream, p, opts)
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
s.opts.statsHandler.HandleRPC(stream.Context(), outPayload)
}
return err
} }
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
sh := s.opts.statsHandler
if sh != nil {
begin := &stats.Begin{
BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
}
defer func() {
if sh != nil {
end := &stats.End{
EndTime: time.Now(),
}
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
sh.HandleRPC(stream.Context(), end)
}
}()
if trInfo != nil { if trInfo != nil {
defer trInfo.tr.Finish() defer trInfo.tr.Finish()
trInfo.firstLine.client = false trInfo.firstLine.client = false
@ -540,7 +672,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
stream.SetSendCompress(s.opts.cp.Type()) stream.SetSendCompress(s.opts.cp.Type())
} }
p := &parser{r: stream} p := &parser{r: stream}
for { for { // TODO: delete
pf, req, err := p.recvMsg(s.opts.maxMsgSize) pf, req, err := p.recvMsg(s.opts.maxMsgSize)
if err == io.EOF { if err == io.EOF {
// The entire stream is done (for unary RPC only). // The entire stream is done (for unary RPC only).
@ -550,58 +682,68 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
} }
if err != nil { if err != nil {
switch err := err.(type) { if st, ok := status.FromError(err); ok {
case *rpcError: if e := t.WriteStatus(stream, st); e != nil {
if err := t.WriteStatus(stream, err.code, err.desc); err != nil { grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
} }
case transport.ConnectionError: } else {
// Nothing to do here. switch st := err.(type) {
case transport.StreamError: case transport.ConnectionError:
if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { // Nothing to do here.
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) case transport.StreamError:
if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
} }
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
} }
return err return err
} }
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
switch err := err.(type) { if st, ok := status.FromError(err); ok {
case *rpcError: if e := t.WriteStatus(stream, st); e != nil {
if err := t.WriteStatus(stream, err.code, err.desc); err != nil { grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
} }
default: return err
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { }
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
} grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
}
var inPayload *stats.InPayload
if sh != nil {
inPayload = &stats.InPayload{
RecvTime: time.Now(),
} }
return err
} }
statusCode := codes.OK
statusDesc := ""
df := func(v interface{}) error { df := func(v interface{}) error {
if inPayload != nil {
inPayload.WireLength = len(req)
}
if pf == compressionMade { if pf == compressionMade {
var err error var err error
req, err = s.opts.dc.Do(bytes.NewReader(req)) req, err = s.opts.dc.Do(bytes.NewReader(req))
if err != nil { if err != nil {
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { return Errorf(codes.Internal, err.Error())
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
}
return err
} }
} }
if len(req) > s.opts.maxMsgSize { if len(req) > s.opts.maxMsgSize {
// TODO: Revisit the error code. Currently keep it consistent with // TODO: Revisit the error code. Currently keep it consistent with
// java implementation. // java implementation.
statusCode = codes.Internal return status.Errorf(codes.Internal, "grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize)
statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize)
} }
if err := s.opts.codec.Unmarshal(req, v); err != nil { if err := s.opts.codec.Unmarshal(req, v); err != nil {
return err return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
if inPayload != nil {
inPayload.Payload = v
inPayload.Data = req
inPayload.Length = len(req)
sh.HandleRPC(stream.Context(), inPayload)
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
@ -610,22 +752,20 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
} }
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
if appErr != nil { if appErr != nil {
if err, ok := appErr.(*rpcError); ok { appStatus, ok := status.FromError(appErr)
statusCode = err.code if !ok {
statusDesc = err.desc // Convert appErr if it is not a grpc status error.
} else { appErr = status.Error(convertCode(appErr), appErr.Error())
statusCode = convertCode(appErr) appStatus, _ = status.FromError(appErr)
statusDesc = appErr.Error()
} }
if trInfo != nil && statusCode != codes.OK { if trInfo != nil {
trInfo.tr.LazyLog(stringer(statusDesc), true) trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { if e := t.WriteStatus(stream, appStatus); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e)
return err
} }
return nil return appErr
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false) trInfo.tr.LazyLog(stringer("OK"), false)
@ -635,38 +775,70 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Delay: false, Delay: false,
} }
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
switch err := err.(type) { if err == io.EOF {
case transport.ConnectionError: // The entire stream is done (for unary RPC only).
// Nothing to do here. return err
case transport.StreamError: }
statusCode = err.Code if s, ok := status.FromError(err); ok {
statusDesc = err.Desc if e := t.WriteStatus(stream, s); e != nil {
default: grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e)
statusCode = codes.Unknown }
statusDesc = err.Error() } else {
switch st := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
}
} }
return err return err
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
} }
return t.WriteStatus(stream, statusCode, statusDesc) // TODO: Should we be logging if writing status failed here, like above?
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
// error or allow the stats handler to see it?
return t.WriteStatus(stream, status.New(codes.OK, ""))
} }
} }
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
sh := s.opts.statsHandler
if sh != nil {
begin := &stats.Begin{
BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
}
defer func() {
if sh != nil {
end := &stats.End{
EndTime: time.Now(),
}
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
sh.HandleRPC(stream.Context(), end)
}
}()
if s.opts.cp != nil { if s.opts.cp != nil {
stream.SetSendCompress(s.opts.cp.Type()) stream.SetSendCompress(s.opts.cp.Type())
} }
ss := &serverStream{ ss := &serverStream{
t: t, t: t,
s: stream, s: stream,
p: &parser{r: stream}, p: &parser{r: stream},
codec: s.opts.codec, codec: s.opts.codec,
cp: s.opts.cp, cp: s.opts.cp,
dc: s.opts.dc, dc: s.opts.dc,
maxMsgSize: s.opts.maxMsgSize, maxMsgSize: s.opts.maxMsgSize,
trInfo: trInfo, trInfo: trInfo,
statsHandler: sh,
} }
if ss.cp != nil { if ss.cp != nil {
ss.cbuf = new(bytes.Buffer) ss.cbuf = new(bytes.Buffer)
@ -685,39 +857,47 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
}() }()
} }
var appErr error var appErr error
var server interface{}
if srv != nil {
server = srv.server
}
if s.opts.streamInt == nil { if s.opts.streamInt == nil {
appErr = sd.Handler(srv.server, ss) appErr = sd.Handler(server, ss)
} else { } else {
info := &StreamServerInfo{ info := &StreamServerInfo{
FullMethod: stream.Method(), FullMethod: stream.Method(),
IsClientStream: sd.ClientStreams, IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams, IsServerStream: sd.ServerStreams,
} }
appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) appErr = s.opts.streamInt(server, ss, info, sd.Handler)
} }
if appErr != nil { if appErr != nil {
if err, ok := appErr.(*rpcError); ok { appStatus, ok := status.FromError(appErr)
ss.statusCode = err.code if !ok {
ss.statusDesc = err.desc switch err := appErr.(type) {
} else if err, ok := appErr.(transport.StreamError); ok { case transport.StreamError:
ss.statusCode = err.Code appStatus = status.New(err.Code, err.Desc)
ss.statusDesc = err.Desc default:
} else { appStatus = status.New(convertCode(appErr), appErr.Error())
ss.statusCode = convertCode(appErr) }
ss.statusDesc = appErr.Error() appErr = appStatus.Err()
} }
if trInfo != nil {
ss.mu.Lock()
ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
ss.trInfo.tr.SetError()
ss.mu.Unlock()
}
t.WriteStatus(ss.s, appStatus)
// TODO: Should we log an error from WriteStatus here and below?
return appErr
} }
if trInfo != nil { if trInfo != nil {
ss.mu.Lock() ss.mu.Lock()
if ss.statusCode != codes.OK { ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
ss.trInfo.tr.SetError()
} else {
ss.trInfo.tr.LazyLog(stringer("OK"), false)
}
ss.mu.Unlock() ss.mu.Unlock()
} }
return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) return t.WriteStatus(ss.s, status.New(codes.OK, ""))
} }
@ -732,7 +912,8 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
if err := t.WriteStatus(stream, status.New(codes.InvalidArgument, errDesc)); err != nil {
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
@ -748,11 +929,16 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
method := sm[pos+1:] method := sm[pos+1:]
srv, ok := s.m[service] srv, ok := s.m[service]
if !ok { if !ok {
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
return
}
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { errDesc := fmt.Sprintf("unknown service %v", service)
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
@ -777,7 +963,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
return
}
errDesc := fmt.Sprintf("unknown method %v", method)
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
@ -801,7 +992,7 @@ func (s *Server) Stop() {
st := s.conns st := s.conns
s.conns = nil s.conns = nil
// interrupt GracefulStop if Stop and GracefulStop are called concurrently. // interrupt GracefulStop if Stop and GracefulStop are called concurrently.
s.cv.Signal() s.cv.Broadcast()
s.mu.Unlock() s.mu.Unlock()
for lis := range listeners { for lis := range listeners {
@ -812,6 +1003,7 @@ func (s *Server) Stop() {
} }
s.mu.Lock() s.mu.Lock()
s.cancel()
if s.events != nil { if s.events != nil {
s.events.Finish() s.events.Finish()
s.events = nil s.events = nil
@ -824,16 +1016,19 @@ func (s *Server) Stop() {
func (s *Server) GracefulStop() { func (s *Server) GracefulStop() {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
if s.drain == true || s.conns == nil { if s.conns == nil {
return return
} }
s.drain = true
for lis := range s.lis { for lis := range s.lis {
lis.Close() lis.Close()
} }
s.lis = nil s.lis = nil
for c := range s.conns { s.cancel()
c.(transport.ServerTransport).Drain() if !s.drain {
for c := range s.conns {
c.(transport.ServerTransport).Drain()
}
s.drain = true
} }
for len(s.conns) != 0 { for len(s.conns) != 0 {
s.cv.Wait() s.cv.Wait()
@ -865,12 +1060,26 @@ func (s *Server) testingCloseConns() {
s.mu.Unlock() s.mu.Unlock()
} }
// SendHeader sends header metadata. It may be called at most once from a unary // SetHeader sets the header metadata.
// RPC handler. The ctx is the RPC handler's Context or one derived from it. // When called multiple times, all the provided metadata will be merged.
func SendHeader(ctx context.Context, md metadata.MD) error { // All the metadata will be sent out when one of the following happens:
// - grpc.SendHeader() is called;
// - The first response is sent out;
// - An RPC status is sent out (error or success).
func SetHeader(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 { if md.Len() == 0 {
return nil return nil
} }
stream, ok := transport.StreamFromContext(ctx)
if !ok {
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
return stream.SetHeader(md)
}
// SendHeader sends header metadata. It may be called at most once.
// The provided md and headers set by SetHeader() will be sent.
func SendHeader(ctx context.Context, md metadata.MD) error {
stream, ok := transport.StreamFromContext(ctx) stream, ok := transport.StreamFromContext(ctx)
if !ok { if !ok {
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
@ -887,7 +1096,6 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
// SetTrailer sets the trailer metadata that will be sent when an RPC returns. // SetTrailer sets the trailer metadata that will be sent when an RPC returns.
// When called more than once, all the provided metadata will be merged. // When called more than once, all the provided metadata will be merged.
// The ctx is the RPC handler's Context or one derived from it.
func SetTrailer(ctx context.Context, md metadata.MD) error { func SetTrailer(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 { if md.Len() == 0 {
return nil return nil

76
vendor/google.golang.org/grpc/stats/handlers.go generated vendored Normal file
View file

@ -0,0 +1,76 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package stats
import (
"net"
"golang.org/x/net/context"
)
// ConnTagInfo defines the relevant information needed by connection context tagger.
type ConnTagInfo struct {
// RemoteAddr is the remote address of the corresponding connection.
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
// TODO add QOS related fields.
}
// RPCTagInfo defines the relevant information needed by RPC context tagger.
type RPCTagInfo struct {
// FullMethodName is the RPC method in the format of /package.service/method.
FullMethodName string
}
// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
type Handler interface {
// TagRPC can attach some information to the given context.
// The returned context is used in the rest lifetime of the RPC.
TagRPC(context.Context, *RPCTagInfo) context.Context
// HandleRPC processes the RPC stats.
HandleRPC(context.Context, RPCStats)
// TagConn can attach some information to the given context.
// The returned context will be used for stats handling.
// For conn stats handling, the context used in HandleConn for this
// connection will be derived from the context returned.
// For RPC stats handling,
// - On server side, the context used in HandleRPC for all RPCs on this
// connection will be derived from the context returned.
// - On client side, the context is not derived from the context returned.
TagConn(context.Context, *ConnTagInfo) context.Context
// HandleConn processes the Conn stats.
HandleConn(context.Context, ConnStats)
}

223
vendor/google.golang.org/grpc/stats/stats.go generated vendored Normal file
View file

@ -0,0 +1,223 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package stats is for collecting and reporting various network and RPC stats.
// This package is for monitoring purpose only. All fields are read-only.
// All APIs are experimental.
package stats // import "google.golang.org/grpc/stats"
import (
"net"
"time"
)
// RPCStats contains stats information about RPCs.
type RPCStats interface {
isRPCStats()
// IsClient returns true if this RPCStats is from client side.
IsClient() bool
}
// Begin contains stats when an RPC begins.
// FailFast are only valid if Client is true.
type Begin struct {
// Client is true if this Begin is from client side.
Client bool
// BeginTime is the time when the RPC begins.
BeginTime time.Time
// FailFast indicates if this RPC is failfast.
FailFast bool
}
// IsClient indicates if this is from client side.
func (s *Begin) IsClient() bool { return s.Client }
func (s *Begin) isRPCStats() {}
// InPayload contains the information for an incoming payload.
type InPayload struct {
// Client is true if this InPayload is from client side.
Client bool
// Payload is the payload with original type.
Payload interface{}
// Data is the serialized message payload.
Data []byte
// Length is the length of uncompressed data.
Length int
// WireLength is the length of data on wire (compressed, signed, encrypted).
WireLength int
// RecvTime is the time when the payload is received.
RecvTime time.Time
}
// IsClient indicates if this is from client side.
func (s *InPayload) IsClient() bool { return s.Client }
func (s *InPayload) isRPCStats() {}
// InHeader contains stats when a header is received.
// FullMethod, addresses and Compression are only valid if Client is false.
type InHeader struct {
// Client is true if this InHeader is from client side.
Client bool
// WireLength is the wire length of header.
WireLength int
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
// RemoteAddr is the remote address of the corresponding connection.
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
// Compression is the compression algorithm used for the RPC.
Compression string
}
// IsClient indicates if this is from client side.
func (s *InHeader) IsClient() bool { return s.Client }
func (s *InHeader) isRPCStats() {}
// InTrailer contains stats when a trailer is received.
type InTrailer struct {
// Client is true if this InTrailer is from client side.
Client bool
// WireLength is the wire length of trailer.
WireLength int
}
// IsClient indicates if this is from client side.
func (s *InTrailer) IsClient() bool { return s.Client }
func (s *InTrailer) isRPCStats() {}
// OutPayload contains the information for an outgoing payload.
type OutPayload struct {
// Client is true if this OutPayload is from client side.
Client bool
// Payload is the payload with original type.
Payload interface{}
// Data is the serialized message payload.
Data []byte
// Length is the length of uncompressed data.
Length int
// WireLength is the length of data on wire (compressed, signed, encrypted).
WireLength int
// SentTime is the time when the payload is sent.
SentTime time.Time
}
// IsClient indicates if this is from client side.
func (s *OutPayload) IsClient() bool { return s.Client }
func (s *OutPayload) isRPCStats() {}
// OutHeader contains stats when a header is sent.
// FullMethod, addresses and Compression are only valid if Client is true.
type OutHeader struct {
// Client is true if this OutHeader is from client side.
Client bool
// WireLength is the wire length of header.
WireLength int
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
// RemoteAddr is the remote address of the corresponding connection.
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
// Compression is the compression algorithm used for the RPC.
Compression string
}
// IsClient indicates if this is from client side.
func (s *OutHeader) IsClient() bool { return s.Client }
func (s *OutHeader) isRPCStats() {}
// OutTrailer contains stats when a trailer is sent.
type OutTrailer struct {
// Client is true if this OutTrailer is from client side.
Client bool
// WireLength is the wire length of trailer.
WireLength int
}
// IsClient indicates if this is from client side.
func (s *OutTrailer) IsClient() bool { return s.Client }
func (s *OutTrailer) isRPCStats() {}
// End contains stats when an RPC ends.
type End struct {
// Client is true if this End is from client side.
Client bool
// EndTime is the time when the RPC ends.
EndTime time.Time
// Error is the error just happened. It implements status.Status if non-nil.
Error error
}
// IsClient indicates if this is from client side.
func (s *End) IsClient() bool { return s.Client }
func (s *End) isRPCStats() {}
// ConnStats contains stats information about connections.
type ConnStats interface {
isConnStats()
// IsClient returns true if this ConnStats is from client side.
IsClient() bool
}
// ConnBegin contains the stats of a connection when it is established.
type ConnBegin struct {
// Client is true if this ConnBegin is from client side.
Client bool
}
// IsClient indicates if this is from client side.
func (s *ConnBegin) IsClient() bool { return s.Client }
func (s *ConnBegin) isConnStats() {}
// ConnEnd contains the stats of a connection when it ends.
type ConnEnd struct {
// Client is true if this ConnEnd is from client side.
Client bool
}
// IsClient indicates if this is from client side.
func (s *ConnEnd) IsClient() bool { return s.Client }
func (s *ConnEnd) isConnStats() {}

136
vendor/google.golang.org/grpc/status/status.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package status implements errors returned by gRPC. These errors are
// serialized and transmitted on the wire between server and client, and allow
// for additional data to be transmitted via the Details field in the status
// proto. gRPC service handlers should return an error created by this
// package, and gRPC clients should expect a corresponding error to be
// returned from the RPC call.
//
// This package upholds the invariants that a non-nil error may not
// contain an OK code, and an OK code must result in a nil error.
package status
import (
"fmt"
"github.com/golang/protobuf/proto"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
)
// statusError is an alias of a status proto. It implements error and Status,
// and a nil statusError should never be returned by this package.
type statusError spb.Status
func (se *statusError) Error() string {
p := (*spb.Status)(se)
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
}
func (se *statusError) status() *Status {
return &Status{s: (*spb.Status)(se)}
}
// Status represents an RPC status code, message, and details. It is immutable
// and should be created with New, Newf, or FromProto.
type Status struct {
s *spb.Status
}
// Code returns the status code contained in s.
func (s *Status) Code() codes.Code {
return codes.Code(s.s.Code)
}
// Message returns the message contained in s.
func (s *Status) Message() string {
return s.s.Message
}
// Proto returns s's status as an spb.Status proto message.
func (s *Status) Proto() *spb.Status {
return proto.Clone(s.s).(*spb.Status)
}
// Err returns an immutable error representing s; returns nil if s.Code() is
// OK.
func (s *Status) Err() error {
if s.Code() == codes.OK {
return nil
}
return (*statusError)(s.s)
}
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
}
// Newf returns New(c, fmt.Sprintf(format, a...)).
func Newf(c codes.Code, format string, a ...interface{}) *Status {
return New(c, fmt.Sprintf(format, a...))
}
// Error returns an error representing c and msg. If c is OK, returns nil.
func Error(c codes.Code, msg string) error {
return New(c, msg).Err()
}
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
func Errorf(c codes.Code, format string, a ...interface{}) error {
return Error(c, fmt.Sprintf(format, a...))
}
// ErrorProto returns an error representing s. If s.Code is OK, returns nil.
func ErrorProto(s *spb.Status) error {
return FromProto(s).Err()
}
// FromProto returns a Status representing s.
func FromProto(s *spb.Status) *Status {
return &Status{s: proto.Clone(s).(*spb.Status)}
}
// FromError returns a Status representing err if it was produced from this
// package, otherwise it returns nil, false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
}
if s, ok := err.(*statusError); ok {
return s.status(), true
}
return nil, false
}

View file

@ -37,7 +37,6 @@ import (
"bytes" "bytes"
"errors" "errors"
"io" "io"
"math"
"sync" "sync"
"time" "time"
@ -45,6 +44,8 @@ import (
"golang.org/x/net/trace" "golang.org/x/net/trace"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
@ -97,7 +98,7 @@ type ClientStream interface {
// NewClientStream creates a new Stream for the client side. This is called // NewClientStream creates a new Stream for the client side. This is called
// by generated code. // by generated code.
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
if cc.dopts.streamInt != nil { if cc.dopts.streamInt != nil {
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
} }
@ -106,11 +107,18 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
var ( var (
t transport.ClientTransport t transport.ClientTransport
s *transport.Stream s *transport.Stream
put func() put func()
cancel context.CancelFunc
) )
c := defaultCallInfo c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok {
c.failFast = !mc.WaitForReady
if mc.Timeout > 0 {
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
}
}
for _, o := range opts { for _, o := range opts {
if err := o.before(&c); err != nil { if err := o.before(&c); err != nil {
return nil, toRPCErr(err) return nil, toRPCErr(err)
@ -143,6 +151,26 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
} }
}() }()
} }
sh := cc.dopts.copts.StatsHandler
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
}
defer func() {
if err != nil && sh != nil {
// Only handle end stats if err != nil.
end := &stats.End{
Client: true,
Error: err,
}
sh.HandleRPC(ctx, end)
}
}()
gopts := BalancerGetOptions{ gopts := BalancerGetOptions{
BlockingWait: !c.failFast, BlockingWait: !c.failFast,
} }
@ -150,7 +178,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
t, put, err = cc.getTransport(ctx, gopts) t, put, err = cc.getTransport(ctx, gopts)
if err != nil { if err != nil {
// TODO(zhaoq): Probably revisit the error handling. // TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok { if _, ok := status.FromError(err); ok {
return nil, err return nil, err
} }
if err == errConnClosing || err == errConnUnavailable { if err == errConnClosing || err == errConnUnavailable {
@ -180,12 +208,14 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
break break
} }
cs := &clientStream{ cs := &clientStream{
opts: opts, opts: opts,
c: c, c: c,
desc: desc, desc: desc,
codec: cc.dopts.codec, codec: cc.dopts.codec,
cp: cc.dopts.cp, cp: cc.dopts.cp,
dc: cc.dopts.dc, dc: cc.dopts.dc,
maxMsgSize: cc.dopts.maxMsgSize,
cancel: cancel,
put: put, put: put,
t: t, t: t,
@ -194,6 +224,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
tracing: EnableTracing, tracing: EnableTracing,
trInfo: trInfo, trInfo: trInfo,
statsCtx: ctx,
statsHandler: cc.dopts.copts.StatsHandler,
} }
if cc.dopts.cp != nil { if cc.dopts.cp != nil {
cs.cbuf = new(bytes.Buffer) cs.cbuf = new(bytes.Buffer)
@ -207,11 +240,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
case <-s.Done(): case <-s.Done():
// TODO: The trace of the RPC is terminated here when there is no pending // TODO: The trace of the RPC is terminated here when there is no pending
// I/O, which is probably not the optimal solution. // I/O, which is probably not the optimal solution.
if s.StatusCode() == codes.OK { cs.finish(s.Status().Err())
cs.finish(nil)
} else {
cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
}
cs.closeTransportStream(nil) cs.closeTransportStream(nil)
case <-s.GoAway(): case <-s.GoAway():
cs.finish(errConnDrain) cs.finish(errConnDrain)
@ -227,25 +256,34 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// clientStream implements a client side Stream. // clientStream implements a client side Stream.
type clientStream struct { type clientStream struct {
opts []CallOption opts []CallOption
c callInfo c callInfo
t transport.ClientTransport t transport.ClientTransport
s *transport.Stream s *transport.Stream
p *parser p *parser
desc *StreamDesc desc *StreamDesc
codec Codec codec Codec
cp Compressor cp Compressor
cbuf *bytes.Buffer cbuf *bytes.Buffer
dc Decompressor dc Decompressor
maxMsgSize int
cancel context.CancelFunc
tracing bool // set to EnableTracing when the clientStream is created. tracing bool // set to EnableTracing when the clientStream is created.
mu sync.Mutex mu sync.Mutex
put func() put func()
closed bool closed bool
finished bool
// trInfo.tr is set when the clientStream is created (if EnableTracing is true), // trInfo.tr is set when the clientStream is created (if EnableTracing is true),
// and is set to nil when the clientStream's finish method is called. // and is set to nil when the clientStream's finish method is called.
trInfo traceInfo trInfo traceInfo
// statsCtx keeps the user context for stats handling.
// All stats collection should use the statsCtx (instead of the stream context)
// so that all the generated stats for a particular RPC can be associated in the processing phase.
statsCtx context.Context
statsHandler stats.Handler
} }
func (cs *clientStream) Context() context.Context { func (cs *clientStream) Context() context.Context {
@ -274,6 +312,8 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
} }
cs.mu.Unlock() cs.mu.Unlock()
} }
// TODO Investigate how to signal the stats handling party.
// generate error stats if err != nil && err != io.EOF?
defer func() { defer func() {
if err != nil { if err != nil {
cs.finish(err) cs.finish(err)
@ -296,7 +336,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
} }
err = toRPCErr(err) err = toRPCErr(err)
}() }()
out, err := encode(cs.codec, m, cs.cp, cs.cbuf) var outPayload *stats.OutPayload
if cs.statsHandler != nil {
outPayload = &stats.OutPayload{
Client: true,
}
}
out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
defer func() { defer func() {
if cs.cbuf != nil { if cs.cbuf != nil {
cs.cbuf.Reset() cs.cbuf.Reset()
@ -305,11 +351,22 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
if err != nil { if err != nil {
return Errorf(codes.Internal, "grpc: %v", err) return Errorf(codes.Internal, "grpc: %v", err)
} }
return cs.t.Write(cs.s, out, &transport.Options{Last: false}) err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
}
return err
} }
func (cs *clientStream) RecvMsg(m interface{}) (err error) { func (cs *clientStream) RecvMsg(m interface{}) (err error) {
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) var inPayload *stats.InPayload
if cs.statsHandler != nil {
inPayload = &stats.InPayload{
Client: true,
}
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, inPayload)
defer func() { defer func() {
// err != nil indicates the termination of the stream. // err != nil indicates the termination of the stream.
if err != nil { if err != nil {
@ -324,21 +381,25 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
} }
cs.mu.Unlock() cs.mu.Unlock()
} }
if inPayload != nil {
cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
}
if !cs.desc.ClientStreams || cs.desc.ServerStreams { if !cs.desc.ClientStreams || cs.desc.ServerStreams {
return return
} }
// Special handling for client streaming rpc. // Special handling for client streaming rpc.
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) // This recv expects EOF or errors, so we don't collect inPayload.
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, nil)
cs.closeTransportStream(err) cs.closeTransportStream(err)
if err == nil { if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
} }
if err == io.EOF { if err == io.EOF {
if cs.s.StatusCode() == codes.OK { if se := cs.s.Status().Err(); se != nil {
cs.finish(err) return se
return nil
} }
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) cs.finish(err)
return nil
} }
return toRPCErr(err) return toRPCErr(err)
} }
@ -346,11 +407,11 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
cs.closeTransportStream(err) cs.closeTransportStream(err)
} }
if err == io.EOF { if err == io.EOF {
if cs.s.StatusCode() == codes.OK { if statusErr := cs.s.Status().Err(); statusErr != nil {
// Returns io.EOF to indicate the end of the stream. return statusErr
return
} }
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) // Returns io.EOF to indicate the end of the stream.
return
} }
return toRPCErr(err) return toRPCErr(err)
} }
@ -386,6 +447,15 @@ func (cs *clientStream) closeTransportStream(err error) {
func (cs *clientStream) finish(err error) { func (cs *clientStream) finish(err error) {
cs.mu.Lock() cs.mu.Lock()
defer cs.mu.Unlock() defer cs.mu.Unlock()
if cs.finished {
return
}
cs.finished = true
defer func() {
if cs.cancel != nil {
cs.cancel()
}
}()
for _, o := range cs.opts { for _, o := range cs.opts {
o.after(&cs.c) o.after(&cs.c)
} }
@ -393,6 +463,17 @@ func (cs *clientStream) finish(err error) {
cs.put() cs.put()
cs.put = nil cs.put = nil
} }
if cs.statsHandler != nil {
end := &stats.End{
Client: true,
EndTime: time.Now(),
}
if err != io.EOF {
// end.Error is nil if the RPC finished successfully.
end.Error = toRPCErr(err)
}
cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
if !cs.tracing { if !cs.tracing {
return return
} }
@ -410,9 +491,16 @@ func (cs *clientStream) finish(err error) {
// ServerStream defines the interface a server stream has to satisfy. // ServerStream defines the interface a server stream has to satisfy.
type ServerStream interface { type ServerStream interface {
// SendHeader sends the header metadata. It should not be called // SetHeader sets the header metadata. It may be called multiple times.
// after SendProto. It fails if called multiple times or if // When call multiple times, all the provided metadata will be merged.
// called after SendProto. // All the metadata will be sent out when one of the following happens:
// - ServerStream.SendHeader() is called;
// - The first response is sent out;
// - An RPC status is sent out (error or success).
SetHeader(metadata.MD) error
// SendHeader sends the header metadata.
// The provided md and headers set by SetHeader() will be sent.
// It fails if called multiple times.
SendHeader(metadata.MD) error SendHeader(metadata.MD) error
// SetTrailer sets the trailer metadata which will be sent with the RPC status. // SetTrailer sets the trailer metadata which will be sent with the RPC status.
// When called more than once, all the provided metadata will be merged. // When called more than once, all the provided metadata will be merged.
@ -430,10 +518,10 @@ type serverStream struct {
dc Decompressor dc Decompressor
cbuf *bytes.Buffer cbuf *bytes.Buffer
maxMsgSize int maxMsgSize int
statusCode codes.Code
statusDesc string
trInfo *traceInfo trInfo *traceInfo
statsHandler stats.Handler
mu sync.Mutex // protects trInfo.tr after the service handler runs. mu sync.Mutex // protects trInfo.tr after the service handler runs.
} }
@ -441,6 +529,13 @@ func (ss *serverStream) Context() context.Context {
return ss.s.Context() return ss.s.Context()
} }
func (ss *serverStream) SetHeader(md metadata.MD) error {
if md.Len() == 0 {
return nil
}
return ss.s.SetHeader(md)
}
func (ss *serverStream) SendHeader(md metadata.MD) error { func (ss *serverStream) SendHeader(md metadata.MD) error {
return ss.t.WriteHeader(ss.s, md) return ss.t.WriteHeader(ss.s, md)
} }
@ -468,7 +563,11 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
ss.mu.Unlock() ss.mu.Unlock()
} }
}() }()
out, err := encode(ss.codec, m, ss.cp, ss.cbuf) var outPayload *stats.OutPayload
if ss.statsHandler != nil {
outPayload = &stats.OutPayload{}
}
out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload)
defer func() { defer func() {
if ss.cbuf != nil { if ss.cbuf != nil {
ss.cbuf.Reset() ss.cbuf.Reset()
@ -481,6 +580,10 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
return toRPCErr(err) return toRPCErr(err)
} }
if outPayload != nil {
outPayload.SentTime = time.Now()
ss.statsHandler.HandleRPC(ss.s.Context(), outPayload)
}
return nil return nil
} }
@ -499,7 +602,11 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
ss.mu.Unlock() ss.mu.Unlock()
} }
}() }()
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize); err != nil { var inPayload *stats.InPayload
if ss.statsHandler != nil {
inPayload = &stats.InPayload{}
}
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil {
if err == io.EOF { if err == io.EOF {
return err return err
} }
@ -508,5 +615,8 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
} }
return toRPCErr(err) return toRPCErr(err)
} }
if inPayload != nil {
ss.statsHandler.HandleRPC(ss.s.Context(), inPayload)
}
return nil return nil
} }

54
vendor/google.golang.org/grpc/tap/tap.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package tap defines the function handles which are executed on the transport
// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
package tap
import (
"golang.org/x/net/context"
)
// Info defines the relevant information needed by the handles.
type Info struct {
// FullMethodName is the string of grpc method (in the format of
// /package.service/method).
FullMethodName string
// TODO: More to be added.
}
// ServerInHandle defines the function which runs when a new stream is created
// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead
// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming
// work in this handle. Otherwise all the RPCs would slow down.
type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)

View file

@ -35,7 +35,9 @@ package transport
import ( import (
"fmt" "fmt"
"math"
"sync" "sync"
"time"
"golang.org/x/net/http2" "golang.org/x/net/http2"
) )
@ -44,8 +46,18 @@ const (
// The default value of flow control window size in HTTP2 spec. // The default value of flow control window size in HTTP2 spec.
defaultWindowSize = 65535 defaultWindowSize = 65535
// The initial window size for flow control. // The initial window size for flow control.
initialWindowSize = defaultWindowSize // for an RPC initialWindowSize = defaultWindowSize // for an RPC
initialConnWindowSize = defaultWindowSize * 16 // for a connection initialConnWindowSize = defaultWindowSize * 16 // for a connection
infinity = time.Duration(math.MaxInt64)
defaultClientKeepaliveTime = infinity
defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
defaultMaxStreamsClient = 100
defaultMaxConnectionIdle = infinity
defaultMaxConnectionAge = infinity
defaultMaxConnectionAgeGrace = infinity
defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
) )
// The following defines various control items which could flow through // The following defines various control items which could flow through
@ -73,6 +85,8 @@ type resetStream struct {
func (*resetStream) item() {} func (*resetStream) item() {}
type goAway struct { type goAway struct {
code http2.ErrCode
debugData []byte
} }
func (*goAway) item() {} func (*goAway) item() {}
@ -111,35 +125,9 @@ func newQuotaPool(q int) *quotaPool {
return qb return qb
} }
// add adds n to the available quota and tries to send it on acquire. // add cancels the pending quota sent on acquired, incremented by v and sends
func (qb *quotaPool) add(n int) {
qb.mu.Lock()
defer qb.mu.Unlock()
qb.quota += n
if qb.quota <= 0 {
return
}
select {
case qb.c <- qb.quota:
qb.quota = 0
default:
}
}
// cancel cancels the pending quota sent on acquire, if any.
func (qb *quotaPool) cancel() {
qb.mu.Lock()
defer qb.mu.Unlock()
select {
case n := <-qb.c:
qb.quota += n
default:
}
}
// reset cancels the pending quota sent on acquired, incremented by v and sends
// it back on acquire. // it back on acquire.
func (qb *quotaPool) reset(v int) { func (qb *quotaPool) add(v int) {
qb.mu.Lock() qb.mu.Lock()
defer qb.mu.Unlock() defer qb.mu.Unlock()
select { select {
@ -151,6 +139,10 @@ func (qb *quotaPool) reset(v int) {
if qb.quota <= 0 { if qb.quota <= 0 {
return return
} }
// After the pool has been created, this is the only place that sends on
// the channel. Since mu is held at this point and any quota that was sent
// on the channel has been retrieved, we know that this code will always
// place any positive quota value on the channel.
select { select {
case qb.c <- qb.quota: case qb.c <- qb.quota:
qb.quota = 0 qb.quota = 0

View file

@ -53,6 +53,7 @@ import (
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer" "google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
) )
// NewServerHandlerTransport returns a ServerTransport handling gRPC // NewServerHandlerTransport returns a ServerTransport handling gRPC
@ -182,7 +183,7 @@ func (ht *serverHandlerTransport) do(fn func()) error {
} }
} }
func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
err := ht.do(func() { err := ht.do(func() {
ht.writeCommonHeaders(s) ht.writeCommonHeaders(s)
@ -192,10 +193,13 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code,
ht.rw.(http.Flusher).Flush() ht.rw.(http.Flusher).Flush()
h := ht.rw.Header() h := ht.rw.Header()
h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
if statusDesc != "" { if m := st.Message(); m != "" {
h.Set("Grpc-Message", encodeGrpcMessage(statusDesc)) h.Set("Grpc-Message", encodeGrpcMessage(m))
} }
// TODO: Support Grpc-Status-Details-Bin
if md := s.Trailer(); len(md) > 0 { if md := s.Trailer(); len(md) > 0 {
for k, vv := range md { for k, vv := range md {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent. // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
@ -234,6 +238,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
h.Add("Trailer", "Grpc-Status") h.Add("Trailer", "Grpc-Status")
h.Add("Trailer", "Grpc-Message") h.Add("Trailer", "Grpc-Message")
// TODO: Support Grpc-Status-Details-Bin
if s.sendCompress != "" { if s.sendCompress != "" {
h.Set("Grpc-Encoding", s.sendCompress) h.Set("Grpc-Encoding", s.sendCompress)
@ -268,7 +273,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
}) })
} }
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
// With this transport type there will be exactly 1 stream: this HTTP request. // With this transport type there will be exactly 1 stream: this HTTP request.
var ctx context.Context var ctx context.Context
@ -314,7 +319,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
if req.TLS != nil { if req.TLS != nil {
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
} }
ctx = metadata.NewContext(ctx, ht.headerMD) ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
ctx = peer.NewContext(ctx, pr) ctx = peer.NewContext(ctx, pr)
s.ctx = newContextWithStream(ctx, s) s.ctx = newContextWithStream(ctx, s)
s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}

View file

@ -35,12 +35,12 @@ package transport
import ( import (
"bytes" "bytes"
"fmt"
"io" "io"
"math" "math"
"net" "net"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -49,17 +49,24 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer" "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
) )
// http2Client implements the ClientTransport interface with HTTP2. // http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct { type http2Client struct {
target string // server name/addr ctx context.Context
userAgent string target string // server name/addr
conn net.Conn // underlying communication channel userAgent string
authInfo credentials.AuthInfo // auth info about the connection md interface{}
nextID uint32 // the next stream ID to be used conn net.Conn // underlying communication channel
remoteAddr net.Addr
localAddr net.Addr
authInfo credentials.AuthInfo // auth info about the connection
nextID uint32 // the next stream ID to be used
// writableChan synchronizes write access to the transport. // writableChan synchronizes write access to the transport.
// A writer acquires the write lock by sending a value on writableChan // A writer acquires the write lock by sending a value on writableChan
@ -75,6 +82,8 @@ type http2Client struct {
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport. // that the server sent GoAway on this transport.
goAway chan struct{} goAway chan struct{}
// awakenKeepalive is used to wake up keepalive when after it has gone dormant.
awakenKeepalive chan struct{}
framer *framer framer *framer
hBuf *bytes.Buffer // the buffer for HPACK encoding hBuf *bytes.Buffer // the buffer for HPACK encoding
@ -94,6 +103,13 @@ type http2Client struct {
creds []credentials.PerRPCCredentials creds []credentials.PerRPCCredentials
// Boolean to keep track of reading activity on transport.
// 1 is true and 0 is false.
activity uint32 // Accessed atomically.
kp keepalive.ClientParameters
statsHandler stats.Handler
mu sync.Mutex // guard the following variables mu sync.Mutex // guard the following variables
state transportState // the state of underlying connection state transportState // the state of underlying connection
activeStreams map[uint32]*Stream activeStreams map[uint32]*Stream
@ -105,9 +121,12 @@ type http2Client struct {
goAwayID uint32 goAwayID uint32
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
prevGoAwayID uint32 prevGoAwayID uint32
// goAwayReason records the http2.ErrCode and debug data received with the
// GoAway frame.
goAwayReason GoAwayReason
} }
func dial(fn func(context.Context, string) (net.Conn, error), ctx context.Context, addr string) (net.Conn, error) { func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
if fn != nil { if fn != nil {
return fn(ctx, addr) return fn(ctx, addr)
} }
@ -145,10 +164,13 @@ func isTemporary(err error) bool {
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction // and starts to receive messages on it. Non-nil error returns if construction
// fails. // fails.
func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ ClientTransport, err error) { func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) {
scheme := "http" scheme := "http"
conn, err := dial(opts.Dialer, ctx, addr) conn, err := dial(ctx, opts.Dialer, addr.Addr)
if err != nil { if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err)
}
return nil, connectionErrorf(true, err, "transport: %v", err) return nil, connectionErrorf(true, err, "transport: %v", err)
} }
// Any further errors will close the underlying connection // Any further errors will close the underlying connection
@ -160,7 +182,7 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl
var authInfo credentials.AuthInfo var authInfo credentials.AuthInfo
if creds := opts.TransportCredentials; creds != nil { if creds := opts.TransportCredentials; creds != nil {
scheme = "https" scheme = "https"
conn, authInfo, err = creds.ClientHandshake(ctx, addr, conn) conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn)
if err != nil { if err != nil {
// Credentials handshake errors are typically considered permanent // Credentials handshake errors are typically considered permanent
// to avoid retrying on e.g. bad certificates. // to avoid retrying on e.g. bad certificates.
@ -168,22 +190,31 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl
return nil, connectionErrorf(temp, err, "transport: %v", err) return nil, connectionErrorf(temp, err, "transport: %v", err)
} }
} }
ua := primaryUA kp := opts.KeepaliveParams
if opts.UserAgent != "" { // Validate keepalive parameters.
ua = opts.UserAgent + " " + ua if kp.Time == 0 {
kp.Time = defaultClientKeepaliveTime
}
if kp.Timeout == 0 {
kp.Timeout = defaultClientKeepaliveTimeout
} }
var buf bytes.Buffer var buf bytes.Buffer
t := &http2Client{ t := &http2Client{
target: addr, ctx: ctx,
userAgent: ua, target: addr.Addr,
conn: conn, userAgent: opts.UserAgent,
authInfo: authInfo, md: addr.Metadata,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: authInfo,
// The client initiated stream id is odd starting from 1. // The client initiated stream id is odd starting from 1.
nextID: 1, nextID: 1,
writableChan: make(chan int, 1), writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}), shutdownChan: make(chan struct{}),
errorChan: make(chan struct{}), errorChan: make(chan struct{}),
goAway: make(chan struct{}), goAway: make(chan struct{}),
awakenKeepalive: make(chan struct{}, 1),
framer: newFramer(conn), framer: newFramer(conn),
hBuf: &buf, hBuf: &buf,
hEnc: hpack.NewEncoder(&buf), hEnc: hpack.NewEncoder(&buf),
@ -194,8 +225,24 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl
state: reachable, state: reachable,
activeStreams: make(map[uint32]*Stream), activeStreams: make(map[uint32]*Stream),
creds: opts.PerRPCCredentials, creds: opts.PerRPCCredentials,
maxStreams: math.MaxInt32, maxStreams: defaultMaxStreamsClient,
streamsQuota: newQuotaPool(defaultMaxStreamsClient),
streamSendQuota: defaultWindowSize, streamSendQuota: defaultWindowSize,
kp: kp,
statsHandler: opts.StatsHandler,
}
// Make sure awakenKeepalive can't be written upon.
// keepalive routine will make it writable, if need be.
t.awakenKeepalive <- struct{}{}
if t.statsHandler != nil {
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{
Client: true,
}
t.statsHandler.HandleConn(t.ctx, connBegin)
} }
// Start the reader goroutine for incoming message. Each transport has // Start the reader goroutine for incoming message. Each transport has
// a dedicated goroutine which reads HTTP2 frame from network. Then it // a dedicated goroutine which reads HTTP2 frame from network. Then it
@ -231,6 +278,9 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl
} }
} }
go t.controller() go t.controller()
if t.kp.Time != infinity {
go t.keepalive()
}
t.writableChan <- 0 t.writableChan <- 0
return t, nil return t, nil
} }
@ -264,16 +314,17 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
return s return s
} }
// NewStream creates a stream and register it into the transport as "active" // NewStream creates a stream and registers it into the transport as "active"
// streams. // streams.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
pr := &peer.Peer{ pr := &peer.Peer{
Addr: t.conn.RemoteAddr(), Addr: t.remoteAddr,
} }
// Attach Auth info if there is any. // Attach Auth info if there is any.
if t.authInfo != nil { if t.authInfo != nil {
pr.AuthInfo = t.authInfo pr.AuthInfo = t.authInfo
} }
userCtx := ctx
ctx = peer.NewContext(ctx, pr) ctx = peer.NewContext(ctx, pr)
authData := make(map[string]string) authData := make(map[string]string)
for _, c := range t.creds { for _, c := range t.creds {
@ -311,21 +362,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
t.mu.Unlock() t.mu.Unlock()
return nil, ErrConnClosing return nil, ErrConnClosing
} }
checkStreamsQuota := t.streamsQuota != nil
t.mu.Unlock() t.mu.Unlock()
if checkStreamsQuota { sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire())
sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) if err != nil {
if err != nil { return nil, err
return nil, err }
} // Returns the quota balance back.
// Returns the quota balance back. if sq > 1 {
if sq > 1 { t.streamsQuota.add(sq - 1)
t.streamsQuota.add(sq - 1)
}
} }
if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
// Return the quota back now because there is no stream returned to the caller. // Return the quota back now because there is no stream returned to the caller.
if _, ok := err.(StreamError); ok && checkStreamsQuota { if _, ok := err.(StreamError); ok {
t.streamsQuota.add(1) t.streamsQuota.add(1)
} }
return nil, err return nil, err
@ -333,9 +381,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
t.mu.Lock() t.mu.Lock()
if t.state == draining { if t.state == draining {
t.mu.Unlock() t.mu.Unlock()
if checkStreamsQuota { t.streamsQuota.add(1)
t.streamsQuota.add(1)
}
// Need to make t writable again so that the rpc in flight can still proceed. // Need to make t writable again so that the rpc in flight can still proceed.
t.writableChan <- 0 t.writableChan <- 0
return nil, ErrStreamDrain return nil, ErrStreamDrain
@ -345,18 +391,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return nil, ErrConnClosing return nil, ErrConnClosing
} }
s := t.newStream(ctx, callHdr) s := t.newStream(ctx, callHdr)
s.clientStatsCtx = userCtx
t.activeStreams[s.id] = s t.activeStreams[s.id] = s
// If the number of active streams change from 0 to 1, then check if keepalive
// has gone dormant. If so, wake it up.
if len(t.activeStreams) == 1 {
select {
case t.awakenKeepalive <- struct{}{}:
t.framer.writePing(false, false, [8]byte{})
default:
}
}
// This stream is not counted when applySetings(...) initialize t.streamsQuota.
// Reset t.streamsQuota to the right value.
var reset bool
if !checkStreamsQuota && t.streamsQuota != nil {
reset = true
}
t.mu.Unlock() t.mu.Unlock()
if reset {
t.streamsQuota.reset(-1)
}
// HPACK encodes various headers. Note that once WriteField(...) is // HPACK encodes various headers. Note that once WriteField(...) is
// called, the corresponding headers/continuation frame has to be sent // called, the corresponding headers/continuation frame has to be sent
@ -388,7 +435,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
hasMD bool hasMD bool
endHeaders bool endHeaders bool
) )
if md, ok := metadata.FromContext(ctx); ok { if md, ok := metadata.FromOutgoingContext(ctx); ok {
hasMD = true hasMD = true
for k, v := range md { for k, v := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
@ -400,7 +447,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
} }
} }
} }
if md, ok := t.md.(*metadata.MD); ok {
for k, v := range *md {
if isReservedHeader(k) {
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
}
}
}
first := true first := true
bufLen := t.hBuf.Len()
// Sends the headers in a single batch even when they span multiple frames. // Sends the headers in a single batch even when they span multiple frames.
for !endHeaders { for !endHeaders {
size := t.hBuf.Len() size := t.hBuf.Len()
@ -435,6 +493,17 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return nil, connectionErrorf(true, err, "transport: %v", err) return nil, connectionErrorf(true, err, "transport: %v", err)
} }
} }
if t.statsHandler != nil {
outHeader := &stats.OutHeader{
Client: true,
WireLength: bufLen,
FullMethod: callHdr.Method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: callHdr.SendCompress,
}
t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader)
}
t.writableChan <- 0 t.writableChan <- 0
return s, nil return s, nil
} }
@ -442,15 +511,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
// CloseStream clears the footprint of a stream when the stream is not needed any more. // CloseStream clears the footprint of a stream when the stream is not needed any more.
// This must not be executed in reader's goroutine. // This must not be executed in reader's goroutine.
func (t *http2Client) CloseStream(s *Stream, err error) { func (t *http2Client) CloseStream(s *Stream, err error) {
var updateStreams bool
t.mu.Lock() t.mu.Lock()
if t.activeStreams == nil { if t.activeStreams == nil {
t.mu.Unlock() t.mu.Unlock()
return return
} }
if t.streamsQuota != nil {
updateStreams = true
}
delete(t.activeStreams, s.id) delete(t.activeStreams, s.id)
if t.state == draining && len(t.activeStreams) == 0 { if t.state == draining && len(t.activeStreams) == 0 {
// The transport is draining and s is the last live stream on t. // The transport is draining and s is the last live stream on t.
@ -459,10 +524,27 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
return return
} }
t.mu.Unlock() t.mu.Unlock()
if updateStreams { // rstStream is true in case the stream is being closed at the client-side
t.streamsQuota.add(1) // and the server needs to be intimated about it by sending a RST_STREAM
} // frame.
// To make sure this frame is written to the wire before the headers of the
// next stream waiting for streamsQuota, we add to streamsQuota pool only
// after having acquired the writableChan to send RST_STREAM out (look at
// the controller() routine).
var rstStream bool
var rstError http2.ErrCode
defer func() {
// In case, the client doesn't have to send RST_STREAM to server
// we can safely add back to streamsQuota pool now.
if !rstStream {
t.streamsQuota.add(1)
return
}
t.controlBuf.put(&resetStream{s.id, rstError})
}()
s.mu.Lock() s.mu.Lock()
rstStream = s.rstStream
rstError = s.rstError
if q := s.fc.resetPendingData(); q > 0 { if q := s.fc.resetPendingData(); q > 0 {
if n := t.fc.onRead(q); n > 0 { if n := t.fc.onRead(q); n > 0 {
t.controlBuf.put(&windowUpdate{0, n}) t.controlBuf.put(&windowUpdate{0, n})
@ -478,8 +560,9 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
} }
s.state = streamDone s.state = streamDone
s.mu.Unlock() s.mu.Unlock()
if se, ok := err.(StreamError); ok && se.Code != codes.DeadlineExceeded { if _, ok := err.(StreamError); ok {
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) rstStream = true
rstError = http2.ErrCodeCancel
} }
} }
@ -513,6 +596,12 @@ func (t *http2Client) Close() (err error) {
s.mu.Unlock() s.mu.Unlock()
s.write(recvMsg{err: ErrConnClosing}) s.write(recvMsg{err: ErrConnClosing})
} }
if t.statsHandler != nil {
connEnd := &stats.ConnEnd{
Client: true,
}
t.statsHandler.HandleConn(t.ctx, connEnd)
}
return return
} }
@ -570,19 +659,14 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
var p []byte var p []byte
if r.Len() > 0 { if r.Len() > 0 {
size := http2MaxFrameLen size := http2MaxFrameLen
s.sendQuotaPool.add(0)
// Wait until the stream has some quota to send the data. // Wait until the stream has some quota to send the data.
sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire())
if err != nil { if err != nil {
return err return err
} }
t.sendQuotaPool.add(0)
// Wait until the transport has some quota to send the data. // Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire())
if err != nil { if err != nil {
if _, ok := err.(StreamError); ok || err == io.EOF {
t.sendQuotaPool.cancel()
}
return err return err
} }
if sq < size { if sq < size {
@ -692,7 +776,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
} }
func (t *http2Client) handleData(f *http2.DataFrame) { func (t *http2Client) handleData(f *http2.DataFrame) {
size := len(f.Data()) size := f.Header().Length
if err := t.fc.onData(uint32(size)); err != nil { if err := t.fc.onData(uint32(size)); err != nil {
t.notifyError(connectionErrorf(true, err, "%v", err)) t.notifyError(connectionErrorf(true, err, "%v", err))
return return
@ -706,6 +790,11 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
return return
} }
if size > 0 { if size > 0 {
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
}
s.mu.Lock() s.mu.Lock()
if s.state == streamDone { if s.state == streamDone {
s.mu.Unlock() s.mu.Unlock()
@ -716,22 +805,27 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
return return
} }
if err := s.fc.onData(uint32(size)); err != nil { if err := s.fc.onData(uint32(size)); err != nil {
s.state = streamDone s.rstStream = true
s.statusCode = codes.Internal s.rstError = http2.ErrCodeFlowControl
s.statusDesc = err.Error() s.finish(status.New(codes.Internal, err.Error()))
close(s.done)
s.mu.Unlock() s.mu.Unlock()
s.write(recvMsg{err: io.EOF}) s.write(recvMsg{err: io.EOF})
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
return return
} }
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
}
}
s.mu.Unlock() s.mu.Unlock()
// TODO(bradfitz, zhaoq): A copy is required here because there is no // TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame. // guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated? // Can this copy be eliminated?
data := make([]byte, size) if len(f.Data()) > 0 {
copy(data, f.Data()) data := make([]byte, len(f.Data()))
s.write(recvMsg{data: data}) copy(data, f.Data())
s.write(recvMsg{data: data})
}
} }
// The server has closed the stream without sending trailers. Record that // The server has closed the stream without sending trailers. Record that
// the read direction is closed, and set the status appropriately. // the read direction is closed, and set the status appropriately.
@ -741,10 +835,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
s.mu.Unlock() s.mu.Unlock()
return return
} }
s.state = streamDone s.finish(status.New(codes.Internal, "server closed the stream without sending trailers"))
s.statusCode = codes.Internal
s.statusDesc = "server closed the stream without sending trailers"
close(s.done)
s.mu.Unlock() s.mu.Unlock()
s.write(recvMsg{err: io.EOF}) s.write(recvMsg{err: io.EOF})
} }
@ -760,18 +851,16 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
s.mu.Unlock() s.mu.Unlock()
return return
} }
s.state = streamDone
if !s.headerDone { if !s.headerDone {
close(s.headerChan) close(s.headerChan)
s.headerDone = true s.headerDone = true
} }
s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)]
if !ok { if !ok {
grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
s.statusCode = codes.Unknown statusCode = codes.Unknown
} }
s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode) s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode))
close(s.done)
s.mu.Unlock() s.mu.Unlock()
s.write(recvMsg{err: io.EOF}) s.write(recvMsg{err: io.EOF})
} }
@ -790,12 +879,18 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
} }
func (t *http2Client) handlePing(f *http2.PingFrame) { func (t *http2Client) handlePing(f *http2.PingFrame) {
if f.IsAck() { // Do nothing.
return
}
pingAck := &ping{ack: true} pingAck := &ping{ack: true}
copy(pingAck.data[:], f.Data[:]) copy(pingAck.data[:], f.Data[:])
t.controlBuf.put(pingAck) t.controlBuf.put(pingAck)
} }
func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
grpclog.Printf("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
}
t.mu.Lock() t.mu.Lock()
if t.state == reachable || t.state == draining { if t.state == reachable || t.state == draining {
if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { if f.LastStreamID > 0 && f.LastStreamID%2 != 1 {
@ -817,6 +912,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Unlock() t.mu.Unlock()
return return
default: default:
t.setGoAwayReason(f)
} }
t.goAwayID = f.LastStreamID t.goAwayID = f.LastStreamID
close(t.goAway) close(t.goAway)
@ -824,6 +920,26 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Unlock() t.mu.Unlock()
} }
// setGoAwayReason sets the value of t.goAwayReason based
// on the GoAway frame received.
// It expects a lock on transport's mutext to be held by
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = NoReason
switch f.ErrCode {
case http2.ErrCodeEnhanceYourCalm:
if string(f.DebugData()) == "too_many_pings" {
t.goAwayReason = TooManyPings
}
}
}
func (t *http2Client) GetGoAwayReason() GoAwayReason {
t.mu.Lock()
defer t.mu.Unlock()
return t.goAwayReason
}
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
id := f.Header().StreamID id := f.Header().StreamID
incr := f.Increment incr := f.Increment
@ -844,21 +960,38 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
} }
var state decodeState var state decodeState
for _, hf := range frame.Fields { for _, hf := range frame.Fields {
state.processHeaderField(hf) if err := state.processHeaderField(hf); err != nil {
} s.mu.Lock()
if state.err != nil { if !s.headerDone {
s.mu.Lock() close(s.headerChan)
if !s.headerDone { s.headerDone = true
close(s.headerChan) }
s.headerDone = true s.mu.Unlock()
s.write(recvMsg{err: err})
// Something wrong. Stops reading even when there is remaining.
return
} }
s.mu.Unlock()
s.write(recvMsg{err: state.err})
// Something wrong. Stops reading even when there is remaining.
return
} }
endStream := frame.StreamEnded() endStream := frame.StreamEnded()
var isHeader bool
defer func() {
if t.statsHandler != nil {
if isHeader {
inHeader := &stats.InHeader{
Client: true,
WireLength: int(frame.Header().Length),
}
t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader)
} else {
inTrailer := &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
}
t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer)
}
}
}()
s.mu.Lock() s.mu.Lock()
if !endStream { if !endStream {
@ -870,6 +1003,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
} }
close(s.headerChan) close(s.headerChan)
s.headerDone = true s.headerDone = true
isHeader = true
} }
if !endStream || s.state == streamDone { if !endStream || s.state == streamDone {
s.mu.Unlock() s.mu.Unlock()
@ -879,10 +1013,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if len(state.mdata) > 0 { if len(state.mdata) > 0 {
s.trailer = state.mdata s.trailer = state.mdata
} }
s.statusCode = state.statusCode s.finish(state.status())
s.statusDesc = state.statusDesc
close(s.done)
s.state = streamDone
s.mu.Unlock() s.mu.Unlock()
s.write(recvMsg{err: io.EOF}) s.write(recvMsg{err: io.EOF})
} }
@ -910,6 +1041,7 @@ func (t *http2Client) reader() {
t.notifyError(err) t.notifyError(err)
return return
} }
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
sf, ok := frame.(*http2.SettingsFrame) sf, ok := frame.(*http2.SettingsFrame)
if !ok { if !ok {
t.notifyError(err) t.notifyError(err)
@ -920,6 +1052,7 @@ func (t *http2Client) reader() {
// loop to keep reading incoming messages on this transport. // loop to keep reading incoming messages on this transport.
for { for {
frame, err := t.framer.readFrame() frame, err := t.framer.readFrame()
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
if err != nil { if err != nil {
// Abort an active stream if the http2.Framer returns a // Abort an active stream if the http2.Framer returns a
// http2.StreamError. This can happen only if the server's response // http2.StreamError. This can happen only if the server's response
@ -971,21 +1104,15 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
s.Val = math.MaxInt32 s.Val = math.MaxInt32
} }
t.mu.Lock() t.mu.Lock()
reset := t.streamsQuota != nil
if !reset {
t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams))
}
ms := t.maxStreams ms := t.maxStreams
t.maxStreams = int(s.Val) t.maxStreams = int(s.Val)
t.mu.Unlock() t.mu.Unlock()
if reset { t.streamsQuota.add(int(s.Val) - ms)
t.streamsQuota.reset(int(s.Val) - ms)
}
case http2.SettingInitialWindowSize: case http2.SettingInitialWindowSize:
t.mu.Lock() t.mu.Lock()
for _, stream := range t.activeStreams { for _, stream := range t.activeStreams {
// Adjust the sending quota for each stream. // Adjust the sending quota for each stream.
stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
} }
t.streamSendQuota = s.Val t.streamSendQuota = s.Val
t.mu.Unlock() t.mu.Unlock()
@ -1013,6 +1140,12 @@ func (t *http2Client) controller() {
t.framer.writeSettings(true, i.ss...) t.framer.writeSettings(true, i.ss...)
} }
case *resetStream: case *resetStream:
// If the server needs to be to intimated about stream closing,
// then we need to make sure the RST_STREAM frame is written to
// the wire before the headers of the next stream waiting on
// streamQuota. We ensure this by adding to the streamsQuota pool
// only after having acquired the writableChan to send RST_STREAM.
t.streamsQuota.add(1)
t.framer.writeRSTStream(true, i.streamID, i.code) t.framer.writeRSTStream(true, i.streamID, i.code)
case *flushIO: case *flushIO:
t.framer.flushWrite() t.framer.flushWrite()
@ -1032,6 +1165,61 @@ func (t *http2Client) controller() {
} }
} }
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
p := &ping{data: [8]byte{}}
timer := time.NewTimer(t.kp.Time)
for {
select {
case <-timer.C:
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
timer.Reset(t.kp.Time)
continue
}
// Check if keepalive should go dormant.
t.mu.Lock()
if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
// Make awakenKeepalive writable.
<-t.awakenKeepalive
t.mu.Unlock()
select {
case <-t.awakenKeepalive:
// If the control gets here a ping has been sent
// need to reset the timer with keepalive.Timeout.
case <-t.shutdownChan:
return
}
} else {
t.mu.Unlock()
// Send ping.
t.controlBuf.put(p)
}
// By the time control gets here a ping has been sent one way or the other.
timer.Reset(t.kp.Timeout)
select {
case <-timer.C:
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
timer.Reset(t.kp.Time)
continue
}
t.Close()
return
case <-t.shutdownChan:
if !timer.Stop() {
<-timer.C
}
return
}
case <-t.shutdownChan:
if !timer.Stop() {
<-timer.C
}
return
}
}
}
func (t *http2Client) Error() <-chan struct{} { func (t *http2Client) Error() <-chan struct{} {
return t.errorChan return t.errorChan
} }

View file

@ -38,18 +38,26 @@ import (
"errors" "errors"
"io" "io"
"math" "math"
"math/rand"
"net" "net"
"strconv" "strconv"
"sync" "sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/net/http2" "golang.org/x/net/http2"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer" "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
) )
// ErrIllegalHeaderWrite indicates that setting header is illegal because of // ErrIllegalHeaderWrite indicates that setting header is illegal because of
@ -58,9 +66,13 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe
// http2Server implements the ServerTransport interface with HTTP2. // http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct { type http2Server struct {
ctx context.Context
conn net.Conn conn net.Conn
remoteAddr net.Addr
localAddr net.Addr
maxStreamID uint32 // max stream ID ever seen maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection authInfo credentials.AuthInfo // auth info about the connection
inTapHandle tap.ServerInHandle
// writableChan synchronizes write access to the transport. // writableChan synchronizes write access to the transport.
// A writer acquires the write lock by receiving a value on writableChan // A writer acquires the write lock by receiving a value on writableChan
// and releases it by sending on writableChan. // and releases it by sending on writableChan.
@ -82,21 +94,46 @@ type http2Server struct {
// sendQuotaPool provides flow control to outbound message. // sendQuotaPool provides flow control to outbound message.
sendQuotaPool *quotaPool sendQuotaPool *quotaPool
stats stats.Handler
// Flag to keep track of reading activity on transport.
// 1 is true and 0 is false.
activity uint32 // Accessed atomically.
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
kep keepalive.EnforcementPolicy
// The time instance last ping was received.
lastPingAt time.Time
// Number of times the client has violated keepalive ping policy so far.
pingStrikes uint8
// Flag to signify that number of ping strikes should be reset to 0.
// This is set whenever data or header frames are sent.
// 1 means yes.
resetPingStrikes uint32 // Accessed atomically.
mu sync.Mutex // guard the following mu sync.Mutex // guard the following
state transportState state transportState
activeStreams map[uint32]*Stream activeStreams map[uint32]*Stream
// the per-stream outbound flow control window size set by the peer. // the per-stream outbound flow control window size set by the peer.
streamSendQuota uint32 streamSendQuota uint32
// idle is the time instant when the connection went idle.
// This is either the begining of the connection or when the number of
// RPCs go down to 0.
// When the connection is busy, this value is set to 0.
idle time.Time
} }
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
// returned if something goes wrong. // returned if something goes wrong.
func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
framer := newFramer(conn) framer := newFramer(conn)
// Send initial settings as connection preface to client. // Send initial settings as connection preface to client.
var settings []http2.Setting var settings []http2.Setting
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
// permitted in the HTTP2 spec. // permitted in the HTTP2 spec.
maxStreams := config.MaxStreams
if maxStreams == 0 { if maxStreams == 0 {
maxStreams = math.MaxUint32 maxStreams = math.MaxUint32
} else { } else {
@ -119,14 +156,40 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI
return nil, connectionErrorf(true, err, "transport: %v", err) return nil, connectionErrorf(true, err, "transport: %v", err)
} }
} }
kp := config.KeepaliveParams
if kp.MaxConnectionIdle == 0 {
kp.MaxConnectionIdle = defaultMaxConnectionIdle
}
if kp.MaxConnectionAge == 0 {
kp.MaxConnectionAge = defaultMaxConnectionAge
}
// Add a jitter to MaxConnectionAge.
kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
if kp.MaxConnectionAgeGrace == 0 {
kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
}
if kp.Time == 0 {
kp.Time = defaultServerKeepaliveTime
}
if kp.Timeout == 0 {
kp.Timeout = defaultServerKeepaliveTimeout
}
kep := config.KeepalivePolicy
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
var buf bytes.Buffer var buf bytes.Buffer
t := &http2Server{ t := &http2Server{
ctx: context.Background(),
conn: conn, conn: conn,
authInfo: authInfo, remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer, framer: framer,
hBuf: &buf, hBuf: &buf,
hEnc: hpack.NewEncoder(&buf), hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams, maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
controlBuf: newRecvBuffer(), controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize}, fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize), sendQuotaPool: newQuotaPool(defaultWindowSize),
@ -135,14 +198,27 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI
shutdownChan: make(chan struct{}), shutdownChan: make(chan struct{}),
activeStreams: make(map[uint32]*Stream), activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize, streamSendQuota: defaultWindowSize,
stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
}
if t.stats != nil {
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{}
t.stats.HandleConn(t.ctx, connBegin)
} }
go t.controller() go t.controller()
go t.keepalive()
t.writableChan <- 0 t.writableChan <- 0
return t, nil return t, nil
} }
// operateHeader takes action on the decoded headers. // operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (close bool) { func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
buf := newRecvBuffer() buf := newRecvBuffer()
s := &Stream{ s := &Stream{
id: frame.Header().StreamID, id: frame.Header().StreamID,
@ -153,13 +229,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
var state decodeState var state decodeState
for _, hf := range frame.Fields { for _, hf := range frame.Fields {
state.processHeaderField(hf) if err := state.processHeaderField(hf); err != nil {
} if se, ok := err.(StreamError); ok {
if err := state.err; err != nil { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
if se, ok := err.(StreamError); ok { }
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) return
} }
return
} }
if frame.StreamEnded() { if frame.StreamEnded() {
@ -168,12 +243,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
} }
s.recvCompress = state.encoding s.recvCompress = state.encoding
if state.timeoutSet { if state.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
} else { } else {
s.ctx, s.cancel = context.WithCancel(context.TODO()) s.ctx, s.cancel = context.WithCancel(t.ctx)
} }
pr := &peer.Peer{ pr := &peer.Peer{
Addr: t.conn.RemoteAddr(), Addr: t.remoteAddr,
} }
// Attach Auth info if there is any. // Attach Auth info if there is any.
if t.authInfo != nil { if t.authInfo != nil {
@ -186,7 +261,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.ctx = newContextWithStream(s.ctx, s) s.ctx = newContextWithStream(s.ctx, s)
// Attach the received metadata to the context. // Attach the received metadata to the context.
if len(state.mdata) > 0 { if len(state.mdata) > 0 {
s.ctx = metadata.NewContext(s.ctx, state.mdata) s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
} }
s.dec = &recvBufferReader{ s.dec = &recvBufferReader{
@ -195,6 +270,18 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
} }
s.recvCompress = state.encoding s.recvCompress = state.encoding
s.method = state.method s.method = state.method
if t.inTapHandle != nil {
var err error
info := &tap.Info{
FullMethodName: state.method,
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
// TODO: Log the real error.
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
return
}
}
t.mu.Lock() t.mu.Lock()
if t.state != reachable { if t.state != reachable {
t.mu.Unlock() t.mu.Unlock()
@ -214,17 +301,33 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
t.maxStreamID = s.id t.maxStreamID = s.id
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
t.activeStreams[s.id] = s t.activeStreams[s.id] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
t.mu.Unlock() t.mu.Unlock()
s.windowHandler = func(n int) { s.windowHandler = func(n int) {
t.updateWindow(s, uint32(n)) t.updateWindow(s, uint32(n))
} }
s.ctx = traceCtx(s.ctx, s.method)
if t.stats != nil {
s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{
FullMethod: s.method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
}
t.stats.HandleRPC(s.ctx, inHeader)
}
handle(s) handle(s)
return return
} }
// HandleStreams receives incoming streams using the given handler. This is // HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine. // typically run in a separate goroutine.
func (t *http2Server) HandleStreams(handle func(*Stream)) { // traceCtx attaches trace to ctx and returns the new context.
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
// Check the validity of client preface. // Check the validity of client preface.
preface := make([]byte, len(clientPreface)) preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil { if _, err := io.ReadFull(t.conn, preface); err != nil {
@ -248,6 +351,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
t.Close() t.Close()
return return
} }
atomic.StoreUint32(&t.activity, 1)
sf, ok := frame.(*http2.SettingsFrame) sf, ok := frame.(*http2.SettingsFrame)
if !ok { if !ok {
grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
@ -258,6 +362,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
for { for {
frame, err := t.framer.readFrame() frame, err := t.framer.readFrame()
atomic.StoreUint32(&t.activity, 1)
if err != nil { if err != nil {
if se, ok := err.(http2.StreamError); ok { if se, ok := err.(http2.StreamError); ok {
t.mu.Lock() t.mu.Lock()
@ -279,7 +384,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
} }
switch frame := frame.(type) { switch frame := frame.(type) {
case *http2.MetaHeadersFrame: case *http2.MetaHeadersFrame:
if t.operateHeaders(frame, handle) { if t.operateHeaders(frame, handle, traceCtx) {
t.Close() t.Close()
break break
} }
@ -334,7 +439,7 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) {
} }
func (t *http2Server) handleData(f *http2.DataFrame) { func (t *http2Server) handleData(f *http2.DataFrame) {
size := len(f.Data()) size := f.Header().Length
if err := t.fc.onData(uint32(size)); err != nil { if err := t.fc.onData(uint32(size)); err != nil {
grpclog.Printf("transport: http2Server %v", err) grpclog.Printf("transport: http2Server %v", err)
t.Close() t.Close()
@ -349,6 +454,11 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
return return
} }
if size > 0 { if size > 0 {
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
}
s.mu.Lock() s.mu.Lock()
if s.state == streamDone { if s.state == streamDone {
s.mu.Unlock() s.mu.Unlock()
@ -364,13 +474,20 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
return return
} }
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
}
}
s.mu.Unlock() s.mu.Unlock()
// TODO(bradfitz, zhaoq): A copy is required here because there is no // TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame. // guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated? // Can this copy be eliminated?
data := make([]byte, size) if len(f.Data()) > 0 {
copy(data, f.Data()) data := make([]byte, len(f.Data()))
s.write(recvMsg{data: data}) copy(data, f.Data())
s.write(recvMsg{data: data})
}
} }
if f.Header().Flags.Has(http2.FlagDataEndStream) { if f.Header().Flags.Has(http2.FlagDataEndStream) {
// Received the end of stream from the client. // Received the end of stream from the client.
@ -404,10 +521,50 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
t.controlBuf.put(&settings{ack: true, ss: ss}) t.controlBuf.put(&settings{ack: true, ss: ss})
} }
const (
maxPingStrikes = 2
defaultPingTimeout = 2 * time.Hour
)
func (t *http2Server) handlePing(f *http2.PingFrame) { func (t *http2Server) handlePing(f *http2.PingFrame) {
if f.IsAck() { // Do nothing.
return
}
pingAck := &ping{ack: true} pingAck := &ping{ack: true}
copy(pingAck.data[:], f.Data[:]) copy(pingAck.data[:], f.Data[:])
t.controlBuf.put(pingAck) t.controlBuf.put(pingAck)
now := time.Now()
defer func() {
t.lastPingAt = now
}()
// A reset ping strikes means that we don't need to check for policy
// violation for this ping and the pingStrikes counter should be set
// to 0.
if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
t.pingStrikes = 0
return
}
t.mu.Lock()
ns := len(t.activeStreams)
t.mu.Unlock()
if ns < 1 && !t.kep.PermitWithoutStream {
// Keepalive shouldn't be active thus, this new ping should
// have come after atleast defaultPingTimeout.
if t.lastPingAt.Add(defaultPingTimeout).After(now) {
t.pingStrikes++
}
} else {
// Check if keepalive policy is respected.
if t.lastPingAt.Add(t.kep.MinTime).After(now) {
t.pingStrikes++
}
}
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings")})
}
} }
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@ -426,6 +583,13 @@ func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) e
first := true first := true
endHeaders := false endHeaders := false
var err error var err error
defer func() {
if err == nil {
// Reset ping strikes when seding headers since that might cause the
// peer to send ping.
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
}()
// Sends the headers in a single batch. // Sends the headers in a single batch.
for !endHeaders { for !endHeaders {
size := t.hBuf.Len() size := t.hBuf.Len()
@ -462,6 +626,14 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
return ErrIllegalHeaderWrite return ErrIllegalHeaderWrite
} }
s.headerOk = true s.headerOk = true
if md.Len() > 0 {
if s.header.Len() > 0 {
s.header = metadata.Join(s.header, md)
} else {
s.header = md
}
}
md = s.header
s.mu.Unlock() s.mu.Unlock()
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
return err return err
@ -481,9 +653,16 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
} }
} }
bufLen := t.hBuf.Len()
if err := t.writeHeaders(s, t.hBuf, false); err != nil { if err := t.writeHeaders(s, t.hBuf, false); err != nil {
return err return err
} }
if t.stats != nil {
outHeader := &stats.OutHeader{
WireLength: bufLen,
}
t.stats.HandleRPC(s.Context(), outHeader)
}
t.writableChan <- 0 t.writableChan <- 0
return nil return nil
} }
@ -492,8 +671,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
// There is no further I/O operations being able to perform on this stream. // There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted. // OK is adopted.
func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
var headersSent bool var headersSent, hasHeader bool
s.mu.Lock() s.mu.Lock()
if s.state == streamDone { if s.state == streamDone {
s.mu.Unlock() s.mu.Unlock()
@ -502,7 +681,16 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
if s.headerOk { if s.headerOk {
headersSent = true headersSent = true
} }
if s.header.Len() > 0 {
hasHeader = true
}
s.mu.Unlock() s.mu.Unlock()
if !headersSent && hasHeader {
t.WriteHeader(s, nil)
headersSent = true
}
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
return err return err
} }
@ -514,9 +702,24 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
t.hEnc.WriteField( t.hEnc.WriteField(
hpack.HeaderField{ hpack.HeaderField{
Name: "grpc-status", Name: "grpc-status",
Value: strconv.Itoa(int(statusCode)), Value: strconv.Itoa(int(st.Code())),
}) })
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)}) t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
panic(err)
}
for k, v := range metadata.New(map[string]string{"grpc-status-details-bin": (string)(stBytes)}) {
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
}
}
}
// Attach the trailer metadata. // Attach the trailer metadata.
for k, v := range s.trailer { for k, v := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent. // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
@ -527,10 +730,17 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
} }
} }
bufLen := t.hBuf.Len()
if err := t.writeHeaders(s, t.hBuf, true); err != nil { if err := t.writeHeaders(s, t.hBuf, true); err != nil {
t.Close() t.Close()
return err return err
} }
if t.stats != nil {
outTrailer := &stats.OutTrailer{
WireLength: bufLen,
}
t.stats.HandleRPC(s.Context(), outTrailer)
}
t.closeStream(s) t.closeStream(s)
t.writableChan <- 0 t.writableChan <- 0
return nil return nil
@ -538,7 +748,7 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error // Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error). // is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) {
// TODO(zhaoq): Support multi-writers for a single stream. // TODO(zhaoq): Support multi-writers for a single stream.
var writeHeaderFrame bool var writeHeaderFrame bool
s.mu.Lock() s.mu.Lock()
@ -548,49 +758,32 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
} }
if !s.headerOk { if !s.headerOk {
writeHeaderFrame = true writeHeaderFrame = true
s.headerOk = true
} }
s.mu.Unlock() s.mu.Unlock()
if writeHeaderFrame { if writeHeaderFrame {
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { t.WriteHeader(s, nil)
return err
}
t.hBuf.Reset()
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
if s.sendCompress != "" {
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
p := http2.HeadersFrameParam{
StreamID: s.id,
BlockFragment: t.hBuf.Bytes(),
EndHeaders: true,
}
if err := t.framer.writeHeaders(false, p); err != nil {
t.Close()
return connectionErrorf(true, err, "transport: %v", err)
}
t.writableChan <- 0
} }
defer func() {
if err == nil {
// Reset ping strikes when sending data since this might cause
// the peer to send ping.
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
}()
r := bytes.NewBuffer(data) r := bytes.NewBuffer(data)
for { for {
if r.Len() == 0 { if r.Len() == 0 {
return nil return nil
} }
size := http2MaxFrameLen size := http2MaxFrameLen
s.sendQuotaPool.add(0)
// Wait until the stream has some quota to send the data. // Wait until the stream has some quota to send the data.
sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
if err != nil { if err != nil {
return err return err
} }
t.sendQuotaPool.add(0)
// Wait until the transport has some quota to send the data. // Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
if err != nil { if err != nil {
if _, ok := err.(StreamError); ok {
t.sendQuotaPool.cancel()
}
return err return err
} }
if sq < size { if sq < size {
@ -658,7 +851,7 @@ func (t *http2Server) applySettings(ss []http2.Setting) {
t.mu.Lock() t.mu.Lock()
defer t.mu.Unlock() defer t.mu.Unlock()
for _, stream := range t.activeStreams { for _, stream := range t.activeStreams {
stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
} }
t.streamSendQuota = s.Val t.streamSendQuota = s.Val
} }
@ -666,6 +859,91 @@ func (t *http2Server) applySettings(ss []http2.Setting) {
} }
} }
// keepalive running in a separate goroutine does the following:
// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-resposive connection
// after an additional duration of keepalive.Timeout.
func (t *http2Server) keepalive() {
p := &ping{}
var pingSent bool
maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
keepalive := time.NewTimer(t.kp.Time)
// NOTE: All exit paths of this function should reset their
// respecitve timers. A failure to do so will cause the
// following clean-up to deadlock and eventually leak.
defer func() {
if !maxIdle.Stop() {
<-maxIdle.C
}
if !maxAge.Stop() {
<-maxAge.C
}
if !keepalive.Stop() {
<-keepalive.C
}
}()
for {
select {
case <-maxIdle.C:
t.mu.Lock()
idle := t.idle
if idle.IsZero() { // The connection is non-idle.
t.mu.Unlock()
maxIdle.Reset(t.kp.MaxConnectionIdle)
continue
}
val := t.kp.MaxConnectionIdle - time.Since(idle)
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
t.state = draining
t.mu.Unlock()
t.Drain()
// Reseting the timer so that the clean-up doesn't deadlock.
maxIdle.Reset(infinity)
return
}
t.mu.Unlock()
maxIdle.Reset(val)
case <-maxAge.C:
t.mu.Lock()
t.state = draining
t.mu.Unlock()
t.Drain()
maxAge.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-maxAge.C:
// Close the connection after grace period.
t.Close()
// Reseting the timer so that the clean-up doesn't deadlock.
maxAge.Reset(infinity)
case <-t.shutdownChan:
}
return
case <-keepalive.C:
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
pingSent = false
keepalive.Reset(t.kp.Time)
continue
}
if pingSent {
t.Close()
// Reseting the timer so that the clean-up doesn't deadlock.
keepalive.Reset(infinity)
return
}
pingSent = true
t.controlBuf.put(p)
keepalive.Reset(t.kp.Timeout)
case <-t.shutdownChan:
return
}
}
}
// controller running in a separate goroutine takes charge of sending control // controller running in a separate goroutine takes charge of sending control
// frames (e.g., window update, reset stream, setting, etc.) to the server. // frames (e.g., window update, reset stream, setting, etc.) to the server.
func (t *http2Server) controller() { func (t *http2Server) controller() {
@ -697,7 +975,10 @@ func (t *http2Server) controller() {
sid := t.maxStreamID sid := t.maxStreamID
t.state = draining t.state = draining
t.mu.Unlock() t.mu.Unlock()
t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil) t.framer.writeGoAway(true, sid, i.code, i.debugData)
if i.code == http2.ErrCodeEnhanceYourCalm {
t.Close()
}
case *flushIO: case *flushIO:
t.framer.flushWrite() t.framer.flushWrite()
case *ping: case *ping:
@ -735,6 +1016,10 @@ func (t *http2Server) Close() (err error) {
for _, s := range streams { for _, s := range streams {
s.cancel() s.cancel()
} }
if t.stats != nil {
connEnd := &stats.ConnEnd{}
t.stats.HandleConn(t.ctx, connEnd)
}
return return
} }
@ -743,6 +1028,9 @@ func (t *http2Server) Close() (err error) {
func (t *http2Server) closeStream(s *Stream) { func (t *http2Server) closeStream(s *Stream) {
t.mu.Lock() t.mu.Lock()
delete(t.activeStreams, s.id) delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
}
if t.state == draining && len(t.activeStreams) == 0 { if t.state == draining && len(t.activeStreams) == 0 {
defer t.Close() defer t.Close()
} }
@ -766,9 +1054,21 @@ func (t *http2Server) closeStream(s *Stream) {
} }
func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) RemoteAddr() net.Addr {
return t.conn.RemoteAddr() return t.remoteAddr
} }
func (t *http2Server) Drain() { func (t *http2Server) Drain() {
t.controlBuf.put(&goAway{}) t.controlBuf.put(&goAway{code: http2.ErrCodeNo})
}
var rgen = rand.New(rand.NewSource(time.Now().UnixNano()))
func getJitter(v time.Duration) time.Duration {
if v == infinity {
return 0
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
j := rgen.Int63n(2*r) - r
return time.Duration(j)
} }

Some files were not shown because too many files have changed in this diff Show more