Fix dependencies
This commit is contained in:
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
222
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
Normal file
222
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// In Go 1.13, the ed25519 package was promoted to the standard library as
|
||||
// crypto/ed25519, and this package became a wrapper for the standard library one.
|
||||
//
|
||||
// +build !go1.13
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
// These functions are also compatible with the “Ed25519” function defined in
|
||||
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
|
||||
// representation includes a public key suffix to make multiple signing
|
||||
// operations with the same key more efficient. This package refers to the RFC
|
||||
// 8032 private key as the “seed”.
|
||||
package ed25519
|
||||
|
||||
// This code is a port of the public domain, “ref10” implementation of ed25519
|
||||
// from SUPERCOP.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/sha512"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/ed25519/internal/edwards25519"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKeySize is the size, in bytes, of public keys as used in this package.
|
||||
PublicKeySize = 32
|
||||
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
|
||||
PrivateKeySize = 64
|
||||
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
|
||||
SignatureSize = 64
|
||||
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
|
||||
SeedSize = 32
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
type PublicKey []byte
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
type PrivateKey []byte
|
||||
|
||||
// Public returns the PublicKey corresponding to priv.
|
||||
func (priv PrivateKey) Public() crypto.PublicKey {
|
||||
publicKey := make([]byte, PublicKeySize)
|
||||
copy(publicKey, priv[32:])
|
||||
return PublicKey(publicKey)
|
||||
}
|
||||
|
||||
// Seed returns the private key seed corresponding to priv. It is provided for
|
||||
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
|
||||
// in this package.
|
||||
func (priv PrivateKey) Seed() []byte {
|
||||
seed := make([]byte, SeedSize)
|
||||
copy(seed, priv[:32])
|
||||
return seed
|
||||
}
|
||||
|
||||
// Sign signs the given message with priv.
|
||||
// Ed25519 performs two passes over messages to be signed and therefore cannot
|
||||
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
|
||||
// indicate the message hasn't been hashed. This can be achieved by passing
|
||||
// crypto.Hash(0) as the value for opts.
|
||||
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
if opts.HashFunc() != crypto.Hash(0) {
|
||||
return nil, errors.New("ed25519: cannot sign hashed message")
|
||||
}
|
||||
|
||||
return Sign(priv, message), nil
|
||||
}
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
if rand == nil {
|
||||
rand = cryptorand.Reader
|
||||
}
|
||||
|
||||
seed := make([]byte, SeedSize)
|
||||
if _, err := io.ReadFull(rand, seed); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
privateKey := NewKeyFromSeed(seed)
|
||||
publicKey := make([]byte, PublicKeySize)
|
||||
copy(publicKey, privateKey[32:])
|
||||
|
||||
return publicKey, privateKey, nil
|
||||
}
|
||||
|
||||
// NewKeyFromSeed calculates a private key from a seed. It will panic if
|
||||
// len(seed) is not SeedSize. This function is provided for interoperability
|
||||
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
|
||||
// package.
|
||||
func NewKeyFromSeed(seed []byte) PrivateKey {
|
||||
if l := len(seed); l != SeedSize {
|
||||
panic("ed25519: bad seed length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
digest := sha512.Sum512(seed)
|
||||
digest[0] &= 248
|
||||
digest[31] &= 127
|
||||
digest[31] |= 64
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var hBytes [32]byte
|
||||
copy(hBytes[:], digest[:])
|
||||
edwards25519.GeScalarMultBase(&A, &hBytes)
|
||||
var publicKeyBytes [32]byte
|
||||
A.ToBytes(&publicKeyBytes)
|
||||
|
||||
privateKey := make([]byte, PrivateKeySize)
|
||||
copy(privateKey, seed)
|
||||
copy(privateKey[32:], publicKeyBytes[:])
|
||||
|
||||
return privateKey
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
if l := len(privateKey); l != PrivateKeySize {
|
||||
panic("ed25519: bad private key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(privateKey[:32])
|
||||
|
||||
var digest1, messageDigest, hramDigest [64]byte
|
||||
var expandedSecretKey [32]byte
|
||||
h.Sum(digest1[:0])
|
||||
copy(expandedSecretKey[:], digest1[:])
|
||||
expandedSecretKey[0] &= 248
|
||||
expandedSecretKey[31] &= 63
|
||||
expandedSecretKey[31] |= 64
|
||||
|
||||
h.Reset()
|
||||
h.Write(digest1[32:])
|
||||
h.Write(message)
|
||||
h.Sum(messageDigest[:0])
|
||||
|
||||
var messageDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
|
||||
var R edwards25519.ExtendedGroupElement
|
||||
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
|
||||
|
||||
var encodedR [32]byte
|
||||
R.ToBytes(&encodedR)
|
||||
|
||||
h.Reset()
|
||||
h.Write(encodedR[:])
|
||||
h.Write(privateKey[32:])
|
||||
h.Write(message)
|
||||
h.Sum(hramDigest[:0])
|
||||
var hramDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
|
||||
|
||||
var s [32]byte
|
||||
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
|
||||
|
||||
signature := make([]byte, SignatureSize)
|
||||
copy(signature[:], encodedR[:])
|
||||
copy(signature[32:], s[:])
|
||||
|
||||
return signature
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
if l := len(publicKey); l != PublicKeySize {
|
||||
panic("ed25519: bad public key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
if len(sig) != SignatureSize || sig[63]&224 != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var publicKeyBytes [32]byte
|
||||
copy(publicKeyBytes[:], publicKey)
|
||||
if !A.FromBytes(&publicKeyBytes) {
|
||||
return false
|
||||
}
|
||||
edwards25519.FeNeg(&A.X, &A.X)
|
||||
edwards25519.FeNeg(&A.T, &A.T)
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(sig[:32])
|
||||
h.Write(publicKey[:])
|
||||
h.Write(message)
|
||||
var digest [64]byte
|
||||
h.Sum(digest[:0])
|
||||
|
||||
var hReduced [32]byte
|
||||
edwards25519.ScReduce(&hReduced, &digest)
|
||||
|
||||
var R edwards25519.ProjectiveGroupElement
|
||||
var s [32]byte
|
||||
copy(s[:], sig[32:])
|
||||
|
||||
// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
|
||||
// the range [0, order) in order to prevent signature malleability.
|
||||
if !edwards25519.ScMinimal(&s) {
|
||||
return false
|
||||
}
|
||||
|
||||
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
|
||||
|
||||
var checkR [32]byte
|
||||
R.ToBytes(&checkR)
|
||||
return bytes.Equal(sig[:32], checkR[:])
|
||||
}
|
73
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
generated
vendored
Normal file
73
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.13
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
// These functions are also compatible with the “Ed25519” function defined in
|
||||
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
|
||||
// representation includes a public key suffix to make multiple signing
|
||||
// operations with the same key more efficient. This package refers to the RFC
|
||||
// 8032 private key as the “seed”.
|
||||
//
|
||||
// Beginning with Go 1.13, the functionality of this package was moved to the
|
||||
// standard library as crypto/ed25519. This package only acts as a compatibility
|
||||
// wrapper.
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKeySize is the size, in bytes, of public keys as used in this package.
|
||||
PublicKeySize = 32
|
||||
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
|
||||
PrivateKeySize = 64
|
||||
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
|
||||
SignatureSize = 64
|
||||
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
|
||||
SeedSize = 32
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PublicKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PublicKey = ed25519.PublicKey
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PrivateKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PrivateKey = ed25519.PrivateKey
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
return ed25519.GenerateKey(rand)
|
||||
}
|
||||
|
||||
// NewKeyFromSeed calculates a private key from a seed. It will panic if
|
||||
// len(seed) is not SeedSize. This function is provided for interoperability
|
||||
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
|
||||
// package.
|
||||
func NewKeyFromSeed(seed []byte) PrivateKey {
|
||||
return ed25519.NewKeyFromSeed(seed)
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
return ed25519.Sign(privateKey, message)
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
return ed25519.Verify(publicKey, message, sig)
|
||||
}
|
1422
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
generated
vendored
Normal file
1422
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1793
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
generated
vendored
Normal file
1793
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
76
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
Normal file
76
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package terminal provides support functions for dealing with terminals, as
|
||||
// commonly found on UNIX systems.
|
||||
//
|
||||
// Deprecated: this package moved to golang.org/x/term.
|
||||
package terminal
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// EscapeCodes contains escape sequences that can be written to the terminal in
|
||||
// order to achieve different styles of text.
|
||||
type EscapeCodes = term.EscapeCodes
|
||||
|
||||
// Terminal contains the state for running a VT100 terminal that is capable of
|
||||
// reading lines of input.
|
||||
type Terminal = term.Terminal
|
||||
|
||||
// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
|
||||
// a local terminal, that terminal must first have been put into raw mode.
|
||||
// prompt is a string that is written at the start of each input line (i.e.
|
||||
// "> ").
|
||||
func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
|
||||
return term.NewTerminal(c, prompt)
|
||||
}
|
||||
|
||||
// ErrPasteIndicator may be returned from ReadLine as the error, in addition
|
||||
// to valid line data. It indicates that bracketed paste mode is enabled and
|
||||
// that the returned line consists only of pasted data. Programs may wish to
|
||||
// interpret pasted data more literally than typed data.
|
||||
var ErrPasteIndicator = term.ErrPasteIndicator
|
||||
|
||||
// State contains the state of a terminal.
|
||||
type State = term.State
|
||||
|
||||
// IsTerminal returns whether the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
return term.IsTerminal(fd)
|
||||
}
|
||||
|
||||
// ReadPassword reads a line of input from a terminal without local echo. This
|
||||
// is commonly used for inputting passwords and other sensitive data. The slice
|
||||
// returned does not include the \n.
|
||||
func ReadPassword(fd int) ([]byte, error) {
|
||||
return term.ReadPassword(fd)
|
||||
}
|
||||
|
||||
// MakeRaw puts the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd int) (*State, error) {
|
||||
return term.MakeRaw(fd)
|
||||
}
|
||||
|
||||
// Restore restores the terminal connected to the given file descriptor to a
|
||||
// previous state.
|
||||
func Restore(fd int, oldState *State) error {
|
||||
return term.Restore(fd, oldState)
|
||||
}
|
||||
|
||||
// GetState returns the current state of a terminal which may be useful to
|
||||
// restore the terminal after a signal.
|
||||
func GetState(fd int) (*State, error) {
|
||||
return term.GetState(fd)
|
||||
}
|
||||
|
||||
// GetSize returns the dimensions of the given terminal.
|
||||
func GetSize(fd int) (width, height int, err error) {
|
||||
return term.GetSize(fd)
|
||||
}
|
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
41
vendor/golang.org/x/net/bpf/asm.go
generated
vendored
Normal file
41
vendor/golang.org/x/net/bpf/asm.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Assemble converts insts into raw instructions suitable for loading
|
||||
// into a BPF virtual machine.
|
||||
//
|
||||
// Currently, no optimization is attempted, the assembled program flow
|
||||
// is exactly as provided.
|
||||
func Assemble(insts []Instruction) ([]RawInstruction, error) {
|
||||
ret := make([]RawInstruction, len(insts))
|
||||
var err error
|
||||
for i, inst := range insts {
|
||||
ret[i], err = inst.Assemble()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Disassemble attempts to parse raw back into
|
||||
// Instructions. Unrecognized RawInstructions are assumed to be an
|
||||
// extension not implemented by this package, and are passed through
|
||||
// unchanged to the output. The allDecoded value reports whether insts
|
||||
// contains no RawInstructions.
|
||||
func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {
|
||||
insts = make([]Instruction, len(raw))
|
||||
allDecoded = true
|
||||
for i, r := range raw {
|
||||
insts[i] = r.Disassemble()
|
||||
if _, ok := insts[i].(RawInstruction); ok {
|
||||
allDecoded = false
|
||||
}
|
||||
}
|
||||
return insts, allDecoded
|
||||
}
|
222
vendor/golang.org/x/net/bpf/constants.go
generated
vendored
Normal file
222
vendor/golang.org/x/net/bpf/constants.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
// A Register is a register of the BPF virtual machine.
|
||||
type Register uint16
|
||||
|
||||
const (
|
||||
// RegA is the accumulator register. RegA is always the
|
||||
// destination register of ALU operations.
|
||||
RegA Register = iota
|
||||
// RegX is the indirection register, used by LoadIndirect
|
||||
// operations.
|
||||
RegX
|
||||
)
|
||||
|
||||
// An ALUOp is an arithmetic or logic operation.
|
||||
type ALUOp uint16
|
||||
|
||||
// ALU binary operation types.
|
||||
const (
|
||||
ALUOpAdd ALUOp = iota << 4
|
||||
ALUOpSub
|
||||
ALUOpMul
|
||||
ALUOpDiv
|
||||
ALUOpOr
|
||||
ALUOpAnd
|
||||
ALUOpShiftLeft
|
||||
ALUOpShiftRight
|
||||
aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.
|
||||
ALUOpMod
|
||||
ALUOpXor
|
||||
)
|
||||
|
||||
// A JumpTest is a comparison operator used in conditional jumps.
|
||||
type JumpTest uint16
|
||||
|
||||
// Supported operators for conditional jumps.
|
||||
// K can be RegX for JumpIfX
|
||||
const (
|
||||
// K == A
|
||||
JumpEqual JumpTest = iota
|
||||
// K != A
|
||||
JumpNotEqual
|
||||
// K > A
|
||||
JumpGreaterThan
|
||||
// K < A
|
||||
JumpLessThan
|
||||
// K >= A
|
||||
JumpGreaterOrEqual
|
||||
// K <= A
|
||||
JumpLessOrEqual
|
||||
// K & A != 0
|
||||
JumpBitsSet
|
||||
// K & A == 0
|
||||
JumpBitsNotSet
|
||||
)
|
||||
|
||||
// An Extension is a function call provided by the kernel that
|
||||
// performs advanced operations that are expensive or impossible
|
||||
// within the BPF virtual machine.
|
||||
//
|
||||
// Extensions are only implemented by the Linux kernel.
|
||||
//
|
||||
// TODO: should we prune this list? Some of these extensions seem
|
||||
// either broken or near-impossible to use correctly, whereas other
|
||||
// (len, random, ifindex) are quite useful.
|
||||
type Extension int
|
||||
|
||||
// Extension functions available in the Linux kernel.
|
||||
const (
|
||||
// extOffset is the negative maximum number of instructions used
|
||||
// to load instructions by overloading the K argument.
|
||||
extOffset = -0x1000
|
||||
// ExtLen returns the length of the packet.
|
||||
ExtLen Extension = 1
|
||||
// ExtProto returns the packet's L3 protocol type.
|
||||
ExtProto Extension = 0
|
||||
// ExtType returns the packet's type (skb->pkt_type in the kernel)
|
||||
//
|
||||
// TODO: better documentation. How nice an API do we want to
|
||||
// provide for these esoteric extensions?
|
||||
ExtType Extension = 4
|
||||
// ExtPayloadOffset returns the offset of the packet payload, or
|
||||
// the first protocol header that the kernel does not know how to
|
||||
// parse.
|
||||
ExtPayloadOffset Extension = 52
|
||||
// ExtInterfaceIndex returns the index of the interface on which
|
||||
// the packet was received.
|
||||
ExtInterfaceIndex Extension = 8
|
||||
// ExtNetlinkAttr returns the netlink attribute of type X at
|
||||
// offset A.
|
||||
ExtNetlinkAttr Extension = 12
|
||||
// ExtNetlinkAttrNested returns the nested netlink attribute of
|
||||
// type X at offset A.
|
||||
ExtNetlinkAttrNested Extension = 16
|
||||
// ExtMark returns the packet's mark value.
|
||||
ExtMark Extension = 20
|
||||
// ExtQueue returns the packet's assigned hardware queue.
|
||||
ExtQueue Extension = 24
|
||||
// ExtLinkLayerType returns the packet's hardware address type
|
||||
// (e.g. Ethernet, Infiniband).
|
||||
ExtLinkLayerType Extension = 28
|
||||
// ExtRXHash returns the packets receive hash.
|
||||
//
|
||||
// TODO: figure out what this rxhash actually is.
|
||||
ExtRXHash Extension = 32
|
||||
// ExtCPUID returns the ID of the CPU processing the current
|
||||
// packet.
|
||||
ExtCPUID Extension = 36
|
||||
// ExtVLANTag returns the packet's VLAN tag.
|
||||
ExtVLANTag Extension = 44
|
||||
// ExtVLANTagPresent returns non-zero if the packet has a VLAN
|
||||
// tag.
|
||||
//
|
||||
// TODO: I think this might be a lie: it reads bit 0x1000 of the
|
||||
// VLAN header, which changed meaning in recent revisions of the
|
||||
// spec - this extension may now return meaningless information.
|
||||
ExtVLANTagPresent Extension = 48
|
||||
// ExtVLANProto returns 0x8100 if the frame has a VLAN header,
|
||||
// 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some
|
||||
// other value if no VLAN information is present.
|
||||
ExtVLANProto Extension = 60
|
||||
// ExtRand returns a uniformly random uint32.
|
||||
ExtRand Extension = 56
|
||||
)
|
||||
|
||||
// The following gives names to various bit patterns used in opcode construction.
|
||||
|
||||
const (
|
||||
opMaskCls uint16 = 0x7
|
||||
// opClsLoad masks
|
||||
opMaskLoadDest = 0x01
|
||||
opMaskLoadWidth = 0x18
|
||||
opMaskLoadMode = 0xe0
|
||||
// opClsALU & opClsJump
|
||||
opMaskOperand = 0x08
|
||||
opMaskOperator = 0xf0
|
||||
)
|
||||
|
||||
const (
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsLoadA uint16 = iota
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsLoadX
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
opClsStoreA
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
opClsStoreX
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsALU
|
||||
// +-----------------------------+---+---+---+---+
|
||||
// | TestOperator (4b) | 0 | 1 | 0 | 1 |
|
||||
// +-----------------------------+---+---+---+---+
|
||||
opClsJump
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
// | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 |
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
opClsReturn
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
// | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 |
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
opClsMisc
|
||||
)
|
||||
|
||||
const (
|
||||
opAddrModeImmediate uint16 = iota << 5
|
||||
opAddrModeAbsolute
|
||||
opAddrModeIndirect
|
||||
opAddrModeScratch
|
||||
opAddrModePacketLen // actually an extension, not an addressing mode.
|
||||
opAddrModeMemShift
|
||||
)
|
||||
|
||||
const (
|
||||
opLoadWidth4 uint16 = iota << 3
|
||||
opLoadWidth2
|
||||
opLoadWidth1
|
||||
)
|
||||
|
||||
// Operand for ALU and Jump instructions
|
||||
type opOperand uint16
|
||||
|
||||
// Supported operand sources.
|
||||
const (
|
||||
opOperandConstant opOperand = iota << 3
|
||||
opOperandX
|
||||
)
|
||||
|
||||
// An jumpOp is a conditional jump condition.
|
||||
type jumpOp uint16
|
||||
|
||||
// Supported jump conditions.
|
||||
const (
|
||||
opJumpAlways jumpOp = iota << 4
|
||||
opJumpEqual
|
||||
opJumpGT
|
||||
opJumpGE
|
||||
opJumpSet
|
||||
)
|
||||
|
||||
const (
|
||||
opRetSrcConstant uint16 = iota << 4
|
||||
opRetSrcA
|
||||
)
|
||||
|
||||
const (
|
||||
opMiscTAX = 0x00
|
||||
opMiscTXA = 0x80
|
||||
)
|
82
vendor/golang.org/x/net/bpf/doc.go
generated
vendored
Normal file
82
vendor/golang.org/x/net/bpf/doc.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Package bpf implements marshaling and unmarshaling of programs for the
|
||||
Berkeley Packet Filter virtual machine, and provides a Go implementation
|
||||
of the virtual machine.
|
||||
|
||||
BPF's main use is to specify a packet filter for network taps, so that
|
||||
the kernel doesn't have to expensively copy every packet it sees to
|
||||
userspace. However, it's been repurposed to other areas where running
|
||||
user code in-kernel is needed. For example, Linux's seccomp uses BPF
|
||||
to apply security policies to system calls. For simplicity, this
|
||||
documentation refers only to packets, but other uses of BPF have their
|
||||
own data payloads.
|
||||
|
||||
BPF programs run in a restricted virtual machine. It has almost no
|
||||
access to kernel functions, and while conditional branches are
|
||||
allowed, they can only jump forwards, to guarantee that there are no
|
||||
infinite loops.
|
||||
|
||||
The virtual machine
|
||||
|
||||
The BPF VM is an accumulator machine. Its main register, called
|
||||
register A, is an implicit source and destination in all arithmetic
|
||||
and logic operations. The machine also has 16 scratch registers for
|
||||
temporary storage, and an indirection register (register X) for
|
||||
indirect memory access. All registers are 32 bits wide.
|
||||
|
||||
Each run of a BPF program is given one packet, which is placed in the
|
||||
VM's read-only "main memory". LoadAbsolute and LoadIndirect
|
||||
instructions can fetch up to 32 bits at a time into register A for
|
||||
examination.
|
||||
|
||||
The goal of a BPF program is to produce and return a verdict (uint32),
|
||||
which tells the kernel what to do with the packet. In the context of
|
||||
packet filtering, the returned value is the number of bytes of the
|
||||
packet to forward to userspace, or 0 to ignore the packet. Other
|
||||
contexts like seccomp define their own return values.
|
||||
|
||||
In order to simplify programs, attempts to read past the end of the
|
||||
packet terminate the program execution with a verdict of 0 (ignore
|
||||
packet). This means that the vast majority of BPF programs don't need
|
||||
to do any explicit bounds checking.
|
||||
|
||||
In addition to the bytes of the packet, some BPF programs have access
|
||||
to extensions, which are essentially calls to kernel utility
|
||||
functions. Currently, the only extensions supported by this package
|
||||
are the Linux packet filter extensions.
|
||||
|
||||
Examples
|
||||
|
||||
This packet filter selects all ARP packets.
|
||||
|
||||
bpf.Assemble([]bpf.Instruction{
|
||||
// Load "EtherType" field from the ethernet header.
|
||||
bpf.LoadAbsolute{Off: 12, Size: 2},
|
||||
// Skip over the next instruction if EtherType is not ARP.
|
||||
bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},
|
||||
// Verdict is "send up to 4k of the packet to userspace."
|
||||
bpf.RetConstant{Val: 4096},
|
||||
// Verdict is "ignore packet."
|
||||
bpf.RetConstant{Val: 0},
|
||||
})
|
||||
|
||||
This packet filter captures a random 1% sample of traffic.
|
||||
|
||||
bpf.Assemble([]bpf.Instruction{
|
||||
// Get a 32-bit random number from the Linux kernel.
|
||||
bpf.LoadExtension{Num: bpf.ExtRand},
|
||||
// 1% dice roll?
|
||||
bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},
|
||||
// Capture.
|
||||
bpf.RetConstant{Val: 4096},
|
||||
// Ignore.
|
||||
bpf.RetConstant{Val: 0},
|
||||
})
|
||||
|
||||
*/
|
||||
package bpf // import "golang.org/x/net/bpf"
|
726
vendor/golang.org/x/net/bpf/instructions.go
generated
vendored
Normal file
726
vendor/golang.org/x/net/bpf/instructions.go
generated
vendored
Normal file
@ -0,0 +1,726 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import "fmt"
|
||||
|
||||
// An Instruction is one instruction executed by the BPF virtual
|
||||
// machine.
|
||||
type Instruction interface {
|
||||
// Assemble assembles the Instruction into a RawInstruction.
|
||||
Assemble() (RawInstruction, error)
|
||||
}
|
||||
|
||||
// A RawInstruction is a raw BPF virtual machine instruction.
|
||||
type RawInstruction struct {
|
||||
// Operation to execute.
|
||||
Op uint16
|
||||
// For conditional jump instructions, the number of instructions
|
||||
// to skip if the condition is true/false.
|
||||
Jt uint8
|
||||
Jf uint8
|
||||
// Constant parameter. The meaning depends on the Op.
|
||||
K uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }
|
||||
|
||||
// Disassemble parses ri into an Instruction and returns it. If ri is
|
||||
// not recognized by this package, ri itself is returned.
|
||||
func (ri RawInstruction) Disassemble() Instruction {
|
||||
switch ri.Op & opMaskCls {
|
||||
case opClsLoadA, opClsLoadX:
|
||||
reg := Register(ri.Op & opMaskLoadDest)
|
||||
sz := 0
|
||||
switch ri.Op & opMaskLoadWidth {
|
||||
case opLoadWidth4:
|
||||
sz = 4
|
||||
case opLoadWidth2:
|
||||
sz = 2
|
||||
case opLoadWidth1:
|
||||
sz = 1
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
switch ri.Op & opMaskLoadMode {
|
||||
case opAddrModeImmediate:
|
||||
if sz != 4 {
|
||||
return ri
|
||||
}
|
||||
return LoadConstant{Dst: reg, Val: ri.K}
|
||||
case opAddrModeScratch:
|
||||
if sz != 4 || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return LoadScratch{Dst: reg, N: int(ri.K)}
|
||||
case opAddrModeAbsolute:
|
||||
if ri.K > extOffset+0xffffffff {
|
||||
return LoadExtension{Num: Extension(-extOffset + ri.K)}
|
||||
}
|
||||
return LoadAbsolute{Size: sz, Off: ri.K}
|
||||
case opAddrModeIndirect:
|
||||
return LoadIndirect{Size: sz, Off: ri.K}
|
||||
case opAddrModePacketLen:
|
||||
if sz != 4 {
|
||||
return ri
|
||||
}
|
||||
return LoadExtension{Num: ExtLen}
|
||||
case opAddrModeMemShift:
|
||||
return LoadMemShift{Off: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsStoreA:
|
||||
if ri.Op != opClsStoreA || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return StoreScratch{Src: RegA, N: int(ri.K)}
|
||||
|
||||
case opClsStoreX:
|
||||
if ri.Op != opClsStoreX || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return StoreScratch{Src: RegX, N: int(ri.K)}
|
||||
|
||||
case opClsALU:
|
||||
switch op := ALUOp(ri.Op & opMaskOperator); op {
|
||||
case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:
|
||||
switch operand := opOperand(ri.Op & opMaskOperand); operand {
|
||||
case opOperandX:
|
||||
return ALUOpX{Op: op}
|
||||
case opOperandConstant:
|
||||
return ALUOpConstant{Op: op, Val: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
case aluOpNeg:
|
||||
return NegateA{}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsJump:
|
||||
switch op := jumpOp(ri.Op & opMaskOperator); op {
|
||||
case opJumpAlways:
|
||||
return Jump{Skip: ri.K}
|
||||
case opJumpEqual, opJumpGT, opJumpGE, opJumpSet:
|
||||
cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf)
|
||||
switch operand := opOperand(ri.Op & opMaskOperand); operand {
|
||||
case opOperandX:
|
||||
return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse}
|
||||
case opOperandConstant:
|
||||
return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsReturn:
|
||||
switch ri.Op {
|
||||
case opClsReturn | opRetSrcA:
|
||||
return RetA{}
|
||||
case opClsReturn | opRetSrcConstant:
|
||||
return RetConstant{Val: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsMisc:
|
||||
switch ri.Op {
|
||||
case opClsMisc | opMiscTAX:
|
||||
return TAX{}
|
||||
case opClsMisc | opMiscTXA:
|
||||
return TXA{}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
default:
|
||||
panic("unreachable") // switch is exhaustive on the bit pattern
|
||||
}
|
||||
}
|
||||
|
||||
func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) {
|
||||
var test JumpTest
|
||||
|
||||
// Decode "fake" jump conditions that don't appear in machine code
|
||||
// Ensures the Assemble -> Disassemble stage recreates the same instructions
|
||||
// See https://github.com/golang/go/issues/18470
|
||||
if skipTrue == 0 {
|
||||
switch op {
|
||||
case opJumpEqual:
|
||||
test = JumpNotEqual
|
||||
case opJumpGT:
|
||||
test = JumpLessOrEqual
|
||||
case opJumpGE:
|
||||
test = JumpLessThan
|
||||
case opJumpSet:
|
||||
test = JumpBitsNotSet
|
||||
}
|
||||
|
||||
return test, skipFalse, 0
|
||||
}
|
||||
|
||||
switch op {
|
||||
case opJumpEqual:
|
||||
test = JumpEqual
|
||||
case opJumpGT:
|
||||
test = JumpGreaterThan
|
||||
case opJumpGE:
|
||||
test = JumpGreaterOrEqual
|
||||
case opJumpSet:
|
||||
test = JumpBitsSet
|
||||
}
|
||||
|
||||
return test, skipTrue, skipFalse
|
||||
}
|
||||
|
||||
// LoadConstant loads Val into register Dst.
|
||||
type LoadConstant struct {
|
||||
Dst Register
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadConstant) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadConstant) String() string {
|
||||
switch a.Dst {
|
||||
case RegA:
|
||||
return fmt.Sprintf("ld #%d", a.Val)
|
||||
case RegX:
|
||||
return fmt.Sprintf("ldx #%d", a.Val)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadScratch loads scratch[N] into register Dst.
|
||||
type LoadScratch struct {
|
||||
Dst Register
|
||||
N int // 0-15
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadScratch) Assemble() (RawInstruction, error) {
|
||||
if a.N < 0 || a.N > 15 {
|
||||
return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
|
||||
}
|
||||
return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadScratch) String() string {
|
||||
switch a.Dst {
|
||||
case RegA:
|
||||
return fmt.Sprintf("ld M[%d]", a.N)
|
||||
case RegX:
|
||||
return fmt.Sprintf("ldx M[%d]", a.N)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadAbsolute loads packet[Off:Off+Size] as an integer value into
|
||||
// register A.
|
||||
type LoadAbsolute struct {
|
||||
Off uint32
|
||||
Size int // 1, 2 or 4
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadAbsolute) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadAbsolute) String() string {
|
||||
switch a.Size {
|
||||
case 1: // byte
|
||||
return fmt.Sprintf("ldb [%d]", a.Off)
|
||||
case 2: // half word
|
||||
return fmt.Sprintf("ldh [%d]", a.Off)
|
||||
case 4: // word
|
||||
if a.Off > extOffset+0xffffffff {
|
||||
return LoadExtension{Num: Extension(a.Off + 0x1000)}.String()
|
||||
}
|
||||
return fmt.Sprintf("ld [%d]", a.Off)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value
|
||||
// into register A.
|
||||
type LoadIndirect struct {
|
||||
Off uint32
|
||||
Size int // 1, 2 or 4
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadIndirect) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadIndirect) String() string {
|
||||
switch a.Size {
|
||||
case 1: // byte
|
||||
return fmt.Sprintf("ldb [x + %d]", a.Off)
|
||||
case 2: // half word
|
||||
return fmt.Sprintf("ldh [x + %d]", a.Off)
|
||||
case 4: // word
|
||||
return fmt.Sprintf("ld [x + %d]", a.Off)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]
|
||||
// by 4 and stores the result in register X.
|
||||
//
|
||||
// This instruction is mainly useful to load into X the length of an
|
||||
// IPv4 packet header in a single instruction, rather than have to do
|
||||
// the arithmetic on the header's first byte by hand.
|
||||
type LoadMemShift struct {
|
||||
Off uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadMemShift) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadMemShift) String() string {
|
||||
return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off)
|
||||
}
|
||||
|
||||
// LoadExtension invokes a linux-specific extension and stores the
|
||||
// result in register A.
|
||||
type LoadExtension struct {
|
||||
Num Extension
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadExtension) Assemble() (RawInstruction, error) {
|
||||
if a.Num == ExtLen {
|
||||
return assembleLoad(RegA, 4, opAddrModePacketLen, 0)
|
||||
}
|
||||
return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num))
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadExtension) String() string {
|
||||
switch a.Num {
|
||||
case ExtLen:
|
||||
return "ld #len"
|
||||
case ExtProto:
|
||||
return "ld #proto"
|
||||
case ExtType:
|
||||
return "ld #type"
|
||||
case ExtPayloadOffset:
|
||||
return "ld #poff"
|
||||
case ExtInterfaceIndex:
|
||||
return "ld #ifidx"
|
||||
case ExtNetlinkAttr:
|
||||
return "ld #nla"
|
||||
case ExtNetlinkAttrNested:
|
||||
return "ld #nlan"
|
||||
case ExtMark:
|
||||
return "ld #mark"
|
||||
case ExtQueue:
|
||||
return "ld #queue"
|
||||
case ExtLinkLayerType:
|
||||
return "ld #hatype"
|
||||
case ExtRXHash:
|
||||
return "ld #rxhash"
|
||||
case ExtCPUID:
|
||||
return "ld #cpu"
|
||||
case ExtVLANTag:
|
||||
return "ld #vlan_tci"
|
||||
case ExtVLANTagPresent:
|
||||
return "ld #vlan_avail"
|
||||
case ExtVLANProto:
|
||||
return "ld #vlan_tpid"
|
||||
case ExtRand:
|
||||
return "ld #rand"
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// StoreScratch stores register Src into scratch[N].
|
||||
type StoreScratch struct {
|
||||
Src Register
|
||||
N int // 0-15
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a StoreScratch) Assemble() (RawInstruction, error) {
|
||||
if a.N < 0 || a.N > 15 {
|
||||
return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
|
||||
}
|
||||
var op uint16
|
||||
switch a.Src {
|
||||
case RegA:
|
||||
op = opClsStoreA
|
||||
case RegX:
|
||||
op = opClsStoreX
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src)
|
||||
}
|
||||
|
||||
return RawInstruction{
|
||||
Op: op,
|
||||
K: uint32(a.N),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a StoreScratch) String() string {
|
||||
switch a.Src {
|
||||
case RegA:
|
||||
return fmt.Sprintf("st M[%d]", a.N)
|
||||
case RegX:
|
||||
return fmt.Sprintf("stx M[%d]", a.N)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// ALUOpConstant executes A = A <Op> Val.
|
||||
type ALUOpConstant struct {
|
||||
Op ALUOp
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a ALUOpConstant) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op),
|
||||
K: a.Val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a ALUOpConstant) String() string {
|
||||
switch a.Op {
|
||||
case ALUOpAdd:
|
||||
return fmt.Sprintf("add #%d", a.Val)
|
||||
case ALUOpSub:
|
||||
return fmt.Sprintf("sub #%d", a.Val)
|
||||
case ALUOpMul:
|
||||
return fmt.Sprintf("mul #%d", a.Val)
|
||||
case ALUOpDiv:
|
||||
return fmt.Sprintf("div #%d", a.Val)
|
||||
case ALUOpMod:
|
||||
return fmt.Sprintf("mod #%d", a.Val)
|
||||
case ALUOpAnd:
|
||||
return fmt.Sprintf("and #%d", a.Val)
|
||||
case ALUOpOr:
|
||||
return fmt.Sprintf("or #%d", a.Val)
|
||||
case ALUOpXor:
|
||||
return fmt.Sprintf("xor #%d", a.Val)
|
||||
case ALUOpShiftLeft:
|
||||
return fmt.Sprintf("lsh #%d", a.Val)
|
||||
case ALUOpShiftRight:
|
||||
return fmt.Sprintf("rsh #%d", a.Val)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// ALUOpX executes A = A <Op> X
|
||||
type ALUOpX struct {
|
||||
Op ALUOp
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a ALUOpX) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(opOperandX) | uint16(a.Op),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a ALUOpX) String() string {
|
||||
switch a.Op {
|
||||
case ALUOpAdd:
|
||||
return "add x"
|
||||
case ALUOpSub:
|
||||
return "sub x"
|
||||
case ALUOpMul:
|
||||
return "mul x"
|
||||
case ALUOpDiv:
|
||||
return "div x"
|
||||
case ALUOpMod:
|
||||
return "mod x"
|
||||
case ALUOpAnd:
|
||||
return "and x"
|
||||
case ALUOpOr:
|
||||
return "or x"
|
||||
case ALUOpXor:
|
||||
return "xor x"
|
||||
case ALUOpShiftLeft:
|
||||
return "lsh x"
|
||||
case ALUOpShiftRight:
|
||||
return "rsh x"
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// NegateA executes A = -A.
|
||||
type NegateA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a NegateA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(aluOpNeg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a NegateA) String() string {
|
||||
return fmt.Sprintf("neg")
|
||||
}
|
||||
|
||||
// Jump skips the following Skip instructions in the program.
|
||||
type Jump struct {
|
||||
Skip uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a Jump) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsJump | uint16(opJumpAlways),
|
||||
K: a.Skip,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a Jump) String() string {
|
||||
return fmt.Sprintf("ja %d", a.Skip)
|
||||
}
|
||||
|
||||
// JumpIf skips the following Skip instructions in the program if A
|
||||
// <Cond> Val is true.
|
||||
type JumpIf struct {
|
||||
Cond JumpTest
|
||||
Val uint32
|
||||
SkipTrue uint8
|
||||
SkipFalse uint8
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a JumpIf) Assemble() (RawInstruction, error) {
|
||||
return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a JumpIf) String() string {
|
||||
return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// JumpIfX skips the following Skip instructions in the program if A
|
||||
// <Cond> X is true.
|
||||
type JumpIfX struct {
|
||||
Cond JumpTest
|
||||
SkipTrue uint8
|
||||
SkipFalse uint8
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a JumpIfX) Assemble() (RawInstruction, error) {
|
||||
return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a JumpIfX) String() string {
|
||||
return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// jumpToRaw assembles a jump instruction into a RawInstruction
|
||||
func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) {
|
||||
var (
|
||||
cond jumpOp
|
||||
flip bool
|
||||
)
|
||||
switch test {
|
||||
case JumpEqual:
|
||||
cond = opJumpEqual
|
||||
case JumpNotEqual:
|
||||
cond, flip = opJumpEqual, true
|
||||
case JumpGreaterThan:
|
||||
cond = opJumpGT
|
||||
case JumpLessThan:
|
||||
cond, flip = opJumpGE, true
|
||||
case JumpGreaterOrEqual:
|
||||
cond = opJumpGE
|
||||
case JumpLessOrEqual:
|
||||
cond, flip = opJumpGT, true
|
||||
case JumpBitsSet:
|
||||
cond = opJumpSet
|
||||
case JumpBitsNotSet:
|
||||
cond, flip = opJumpSet, true
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test)
|
||||
}
|
||||
jt, jf := skipTrue, skipFalse
|
||||
if flip {
|
||||
jt, jf = jf, jt
|
||||
}
|
||||
return RawInstruction{
|
||||
Op: opClsJump | uint16(cond) | uint16(operand),
|
||||
Jt: jt,
|
||||
Jf: jf,
|
||||
K: k,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// jumpToString converts a jump instruction to assembler notation
|
||||
func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string {
|
||||
switch cond {
|
||||
// K == A
|
||||
case JumpEqual:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq")
|
||||
// K != A
|
||||
case JumpNotEqual:
|
||||
return fmt.Sprintf("jneq %s,%d", operand, skipTrue)
|
||||
// K > A
|
||||
case JumpGreaterThan:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle")
|
||||
// K < A
|
||||
case JumpLessThan:
|
||||
return fmt.Sprintf("jlt %s,%d", operand, skipTrue)
|
||||
// K >= A
|
||||
case JumpGreaterOrEqual:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt")
|
||||
// K <= A
|
||||
case JumpLessOrEqual:
|
||||
return fmt.Sprintf("jle %s,%d", operand, skipTrue)
|
||||
// K & A != 0
|
||||
case JumpBitsSet:
|
||||
if skipFalse > 0 {
|
||||
return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse)
|
||||
}
|
||||
return fmt.Sprintf("jset %s,%d", operand, skipTrue)
|
||||
// K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips
|
||||
case JumpBitsNotSet:
|
||||
return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue)
|
||||
default:
|
||||
return fmt.Sprintf("unknown JumpTest %#v", cond)
|
||||
}
|
||||
}
|
||||
|
||||
func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string {
|
||||
if skipTrue > 0 {
|
||||
if skipFalse > 0 {
|
||||
return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse)
|
||||
}
|
||||
return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue)
|
||||
}
|
||||
return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse)
|
||||
}
|
||||
|
||||
// RetA exits the BPF program, returning the value of register A.
|
||||
type RetA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a RetA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsReturn | opRetSrcA,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a RetA) String() string {
|
||||
return fmt.Sprintf("ret a")
|
||||
}
|
||||
|
||||
// RetConstant exits the BPF program, returning a constant value.
|
||||
type RetConstant struct {
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a RetConstant) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsReturn | opRetSrcConstant,
|
||||
K: a.Val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a RetConstant) String() string {
|
||||
return fmt.Sprintf("ret #%d", a.Val)
|
||||
}
|
||||
|
||||
// TXA copies the value of register X to register A.
|
||||
type TXA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a TXA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsMisc | opMiscTXA,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a TXA) String() string {
|
||||
return fmt.Sprintf("txa")
|
||||
}
|
||||
|
||||
// TAX copies the value of register A to register X.
|
||||
type TAX struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a TAX) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsMisc | opMiscTAX,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a TAX) String() string {
|
||||
return fmt.Sprintf("tax")
|
||||
}
|
||||
|
||||
func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {
|
||||
var (
|
||||
cls uint16
|
||||
sz uint16
|
||||
)
|
||||
switch dst {
|
||||
case RegA:
|
||||
cls = opClsLoadA
|
||||
case RegX:
|
||||
cls = opClsLoadX
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid target register %v", dst)
|
||||
}
|
||||
switch loadSize {
|
||||
case 1:
|
||||
sz = opLoadWidth1
|
||||
case 2:
|
||||
sz = opLoadWidth2
|
||||
case 4:
|
||||
sz = opLoadWidth4
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz)
|
||||
}
|
||||
return RawInstruction{
|
||||
Op: cls | sz | mode,
|
||||
K: k,
|
||||
}, nil
|
||||
}
|
10
vendor/golang.org/x/net/bpf/setter.go
generated
vendored
Normal file
10
vendor/golang.org/x/net/bpf/setter.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
// A Setter is a type which can attach a compiled BPF filter to itself.
|
||||
type Setter interface {
|
||||
SetBPF(filter []RawInstruction) error
|
||||
}
|
150
vendor/golang.org/x/net/bpf/vm.go
generated
vendored
Normal file
150
vendor/golang.org/x/net/bpf/vm.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A VM is an emulated BPF virtual machine.
|
||||
type VM struct {
|
||||
filter []Instruction
|
||||
}
|
||||
|
||||
// NewVM returns a new VM using the input BPF program.
|
||||
func NewVM(filter []Instruction) (*VM, error) {
|
||||
if len(filter) == 0 {
|
||||
return nil, errors.New("one or more Instructions must be specified")
|
||||
}
|
||||
|
||||
for i, ins := range filter {
|
||||
check := len(filter) - (i + 1)
|
||||
switch ins := ins.(type) {
|
||||
// Check for out-of-bounds jumps in instructions
|
||||
case Jump:
|
||||
if check <= int(ins.Skip) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
|
||||
}
|
||||
case JumpIf:
|
||||
if check <= int(ins.SkipTrue) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
|
||||
}
|
||||
if check <= int(ins.SkipFalse) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
|
||||
}
|
||||
case JumpIfX:
|
||||
if check <= int(ins.SkipTrue) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
|
||||
}
|
||||
if check <= int(ins.SkipFalse) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
|
||||
}
|
||||
// Check for division or modulus by zero
|
||||
case ALUOpConstant:
|
||||
if ins.Val != 0 {
|
||||
break
|
||||
}
|
||||
|
||||
switch ins.Op {
|
||||
case ALUOpDiv, ALUOpMod:
|
||||
return nil, errors.New("cannot divide by zero using ALUOpConstant")
|
||||
}
|
||||
// Check for unknown extensions
|
||||
case LoadExtension:
|
||||
switch ins.Num {
|
||||
case ExtLen:
|
||||
default:
|
||||
return nil, fmt.Errorf("extension %d not implemented", ins.Num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure last instruction is a return instruction
|
||||
switch filter[len(filter)-1].(type) {
|
||||
case RetA, RetConstant:
|
||||
default:
|
||||
return nil, errors.New("BPF program must end with RetA or RetConstant")
|
||||
}
|
||||
|
||||
// Though our VM works using disassembled instructions, we
|
||||
// attempt to assemble the input filter anyway to ensure it is compatible
|
||||
// with an operating system VM.
|
||||
_, err := Assemble(filter)
|
||||
|
||||
return &VM{
|
||||
filter: filter,
|
||||
}, err
|
||||
}
|
||||
|
||||
// Run runs the VM's BPF program against the input bytes.
|
||||
// Run returns the number of bytes accepted by the BPF program, and any errors
|
||||
// which occurred while processing the program.
|
||||
func (v *VM) Run(in []byte) (int, error) {
|
||||
var (
|
||||
// Registers of the virtual machine
|
||||
regA uint32
|
||||
regX uint32
|
||||
regScratch [16]uint32
|
||||
|
||||
// OK is true if the program should continue processing the next
|
||||
// instruction, or false if not, causing the loop to break
|
||||
ok = true
|
||||
)
|
||||
|
||||
// TODO(mdlayher): implement:
|
||||
// - NegateA:
|
||||
// - would require a change from uint32 registers to int32
|
||||
// registers
|
||||
|
||||
// TODO(mdlayher): add interop tests that check signedness of ALU
|
||||
// operations against kernel implementation, and make sure Go
|
||||
// implementation matches behavior
|
||||
|
||||
for i := 0; i < len(v.filter) && ok; i++ {
|
||||
ins := v.filter[i]
|
||||
|
||||
switch ins := ins.(type) {
|
||||
case ALUOpConstant:
|
||||
regA = aluOpConstant(ins, regA)
|
||||
case ALUOpX:
|
||||
regA, ok = aluOpX(ins, regA, regX)
|
||||
case Jump:
|
||||
i += int(ins.Skip)
|
||||
case JumpIf:
|
||||
jump := jumpIf(ins, regA)
|
||||
i += jump
|
||||
case JumpIfX:
|
||||
jump := jumpIfX(ins, regA, regX)
|
||||
i += jump
|
||||
case LoadAbsolute:
|
||||
regA, ok = loadAbsolute(ins, in)
|
||||
case LoadConstant:
|
||||
regA, regX = loadConstant(ins, regA, regX)
|
||||
case LoadExtension:
|
||||
regA = loadExtension(ins, in)
|
||||
case LoadIndirect:
|
||||
regA, ok = loadIndirect(ins, in, regX)
|
||||
case LoadMemShift:
|
||||
regX, ok = loadMemShift(ins, in)
|
||||
case LoadScratch:
|
||||
regA, regX = loadScratch(ins, regScratch, regA, regX)
|
||||
case RetA:
|
||||
return int(regA), nil
|
||||
case RetConstant:
|
||||
return int(ins.Val), nil
|
||||
case StoreScratch:
|
||||
regScratch = storeScratch(ins, regScratch, regA, regX)
|
||||
case TAX:
|
||||
regX = regA
|
||||
case TXA:
|
||||
regA = regX
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
182
vendor/golang.org/x/net/bpf/vm_instructions.go
generated
vendored
Normal file
182
vendor/golang.org/x/net/bpf/vm_instructions.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {
|
||||
return aluOpCommon(ins.Op, regA, ins.Val)
|
||||
}
|
||||
|
||||
func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {
|
||||
// Guard against division or modulus by zero by terminating
|
||||
// the program, as the OS BPF VM does
|
||||
if regX == 0 {
|
||||
switch ins.Op {
|
||||
case ALUOpDiv, ALUOpMod:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
return aluOpCommon(ins.Op, regA, regX), true
|
||||
}
|
||||
|
||||
func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {
|
||||
switch op {
|
||||
case ALUOpAdd:
|
||||
return regA + value
|
||||
case ALUOpSub:
|
||||
return regA - value
|
||||
case ALUOpMul:
|
||||
return regA * value
|
||||
case ALUOpDiv:
|
||||
// Division by zero not permitted by NewVM and aluOpX checks
|
||||
return regA / value
|
||||
case ALUOpOr:
|
||||
return regA | value
|
||||
case ALUOpAnd:
|
||||
return regA & value
|
||||
case ALUOpShiftLeft:
|
||||
return regA << value
|
||||
case ALUOpShiftRight:
|
||||
return regA >> value
|
||||
case ALUOpMod:
|
||||
// Modulus by zero not permitted by NewVM and aluOpX checks
|
||||
return regA % value
|
||||
case ALUOpXor:
|
||||
return regA ^ value
|
||||
default:
|
||||
return regA
|
||||
}
|
||||
}
|
||||
|
||||
func jumpIf(ins JumpIf, regA uint32) int {
|
||||
return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val)
|
||||
}
|
||||
|
||||
func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int {
|
||||
return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX)
|
||||
}
|
||||
|
||||
func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int {
|
||||
var ok bool
|
||||
|
||||
switch cond {
|
||||
case JumpEqual:
|
||||
ok = regA == value
|
||||
case JumpNotEqual:
|
||||
ok = regA != value
|
||||
case JumpGreaterThan:
|
||||
ok = regA > value
|
||||
case JumpLessThan:
|
||||
ok = regA < value
|
||||
case JumpGreaterOrEqual:
|
||||
ok = regA >= value
|
||||
case JumpLessOrEqual:
|
||||
ok = regA <= value
|
||||
case JumpBitsSet:
|
||||
ok = (regA & value) != 0
|
||||
case JumpBitsNotSet:
|
||||
ok = (regA & value) == 0
|
||||
}
|
||||
|
||||
if ok {
|
||||
return int(skipTrue)
|
||||
}
|
||||
|
||||
return int(skipFalse)
|
||||
}
|
||||
|
||||
func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
|
||||
offset := int(ins.Off)
|
||||
size := int(ins.Size)
|
||||
|
||||
return loadCommon(in, offset, size)
|
||||
}
|
||||
|
||||
func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {
|
||||
switch ins.Dst {
|
||||
case RegA:
|
||||
regA = ins.Val
|
||||
case RegX:
|
||||
regX = ins.Val
|
||||
}
|
||||
|
||||
return regA, regX
|
||||
}
|
||||
|
||||
func loadExtension(ins LoadExtension, in []byte) uint32 {
|
||||
switch ins.Num {
|
||||
case ExtLen:
|
||||
return uint32(len(in))
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented extension: %d", ins.Num))
|
||||
}
|
||||
}
|
||||
|
||||
func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
|
||||
offset := int(ins.Off) + int(regX)
|
||||
size := int(ins.Size)
|
||||
|
||||
return loadCommon(in, offset, size)
|
||||
}
|
||||
|
||||
func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {
|
||||
offset := int(ins.Off)
|
||||
|
||||
// Size of LoadMemShift is always 1 byte
|
||||
if !inBounds(len(in), offset, 1) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Mask off high 4 bits and multiply low 4 bits by 4
|
||||
return uint32(in[offset]&0x0f) * 4, true
|
||||
}
|
||||
|
||||
func inBounds(inLen int, offset int, size int) bool {
|
||||
return offset+size <= inLen
|
||||
}
|
||||
|
||||
func loadCommon(in []byte, offset int, size int) (uint32, bool) {
|
||||
if !inBounds(len(in), offset, size) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch size {
|
||||
case 1:
|
||||
return uint32(in[offset]), true
|
||||
case 2:
|
||||
return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true
|
||||
case 4:
|
||||
return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid load size: %d", size))
|
||||
}
|
||||
}
|
||||
|
||||
func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {
|
||||
switch ins.Dst {
|
||||
case RegA:
|
||||
regA = regScratch[ins.N]
|
||||
case RegX:
|
||||
regX = regScratch[ins.N]
|
||||
}
|
||||
|
||||
return regA, regX
|
||||
}
|
||||
|
||||
func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {
|
||||
switch ins.Src {
|
||||
case RegA:
|
||||
regScratch[ins.N] = regA
|
||||
case RegX:
|
||||
regScratch[ins.N] = regX
|
||||
}
|
||||
|
||||
return regScratch
|
||||
}
|
56
vendor/golang.org/x/net/context/context.go
generated
vendored
Normal file
56
vendor/golang.org/x/net/context/context.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package context defines the Context type, which carries deadlines,
|
||||
// cancelation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
// As of Go 1.7 this package is available in the standard library under the
|
||||
// name context. https://golang.org/pkg/context.
|
||||
//
|
||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
||||
// servers should accept a Context. The chain of function calls between must
|
||||
// propagate the Context, optionally replacing it with a modified copy created
|
||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
||||
//
|
||||
// Programs that use Contexts should follow these rules to keep interfaces
|
||||
// consistent across packages and enable static analysis tools to check context
|
||||
// propagation:
|
||||
//
|
||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
||||
// explicitly to each function that needs it. The Context should be the first
|
||||
// parameter, typically named ctx:
|
||||
//
|
||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
||||
// // ... use ctx ...
|
||||
// }
|
||||
//
|
||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
||||
// if you are unsure about which Context to use.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
//
|
||||
// The same Context may be passed to functions running in different goroutines;
|
||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
||||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context // import "golang.org/x/net/context"
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
// initialization, and tests, and as the top-level Context for incoming
|
||||
// requests.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
Normal file
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
||||
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Do sends an HTTP request with the provided http.Client and returns
|
||||
// an HTTP response.
|
||||
//
|
||||
// If the client is nil, http.DefaultClient is used.
|
||||
//
|
||||
// The provided ctx must be non-nil. If it is canceled or times out,
|
||||
// ctx.Err() will be returned.
|
||||
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
// If we got an error, and the context has been canceled,
|
||||
// the context's error is probably more useful.
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Get issues a GET request via the Do function.
|
||||
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Head issues a HEAD request via the Do function.
|
||||
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Post issues a POST request via the Do function.
|
||||
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", bodyType)
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// PostForm issues a POST request via the Do function.
|
||||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||
}
|
72
vendor/golang.org/x/net/context/go17.go
generated
vendored
Normal file
72
vendor/golang.org/x/net/context/go17.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
todo = context.TODO()
|
||||
background = context.Background()
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
ctx, f := context.WithCancel(parent)
|
||||
return ctx, CancelFunc(f)
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
ctx, f := context.WithDeadline(parent, deadline)
|
||||
return ctx, CancelFunc(f)
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
20
vendor/golang.org/x/net/context/go19.go
generated
vendored
Normal file
20
vendor/golang.org/x/net/context/go19.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "context" // standard library's context, as of Go 1.7
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
300
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
Normal file
300
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
Normal file
@ -0,0 +1,300 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case background:
|
||||
return "context.Background"
|
||||
case todo:
|
||||
return "context.TODO"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
background = new(emptyCtx)
|
||||
todo = new(emptyCtx)
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = errors.New("context canceled")
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, c)
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) *cancelCtx {
|
||||
return &cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// propagateCancel arranges for child to be canceled when parent is.
|
||||
func propagateCancel(parent Context, child canceler) {
|
||||
if parent.Done() == nil {
|
||||
return // parent is never canceled
|
||||
}
|
||||
if p, ok := parentCancelCtx(parent); ok {
|
||||
p.mu.Lock()
|
||||
if p.err != nil {
|
||||
// parent has already been canceled
|
||||
child.cancel(false, p.err)
|
||||
} else {
|
||||
if p.children == nil {
|
||||
p.children = make(map[canceler]bool)
|
||||
}
|
||||
p.children[child] = true
|
||||
}
|
||||
p.mu.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
select {
|
||||
case <-parent.Done():
|
||||
child.cancel(false, parent.Err())
|
||||
case <-child.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// parentCancelCtx follows a chain of parent references until it finds a
|
||||
// *cancelCtx. This function understands how each of the concrete types in this
|
||||
// package represents its parent.
|
||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
||||
for {
|
||||
switch c := parent.(type) {
|
||||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeChild removes a context from its parent.
|
||||
func removeChild(parent Context, child canceler) {
|
||||
p, ok := parentCancelCtx(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
if p.children != nil {
|
||||
delete(p.children, child)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// A canceler is a context type that can be canceled directly. The
|
||||
// implementations are *cancelCtx and *timerCtx.
|
||||
type canceler interface {
|
||||
cancel(removeFromParent bool, err error)
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
||||
// that implement canceler.
|
||||
type cancelCtx struct {
|
||||
Context
|
||||
|
||||
done chan struct{} // closed by the first cancel call.
|
||||
|
||||
mu sync.Mutex
|
||||
children map[canceler]bool // set to nil by the first cancel call
|
||||
err error // set to non-nil by the first cancel call
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Done() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *cancelCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
||||
}
|
||||
|
||||
// cancel closes c.done, cancels each of c's children, and, if
|
||||
// removeFromParent is true, removes c from its parent's children.
|
||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
||||
if err == nil {
|
||||
panic("context: internal error: missing cancel error")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return // already canceled
|
||||
}
|
||||
c.err = err
|
||||
close(c.done)
|
||||
for child := range c.children {
|
||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
||||
child.cancel(false, err)
|
||||
}
|
||||
c.children = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if removeFromParent {
|
||||
removeChild(c.Context, c)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
||||
// The current deadline is already sooner than the new one.
|
||||
return WithCancel(parent)
|
||||
}
|
||||
c := &timerCtx{
|
||||
cancelCtx: newCancelCtx(parent),
|
||||
deadline: deadline,
|
||||
}
|
||||
propagateCancel(parent, c)
|
||||
d := deadline.Sub(time.Now())
|
||||
if d <= 0 {
|
||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err == nil {
|
||||
c.timer = time.AfterFunc(d, func() {
|
||||
c.cancel(true, DeadlineExceeded)
|
||||
})
|
||||
}
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
||||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
*cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
}
|
||||
|
||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return c.deadline, true
|
||||
}
|
||||
|
||||
func (c *timerCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
||||
}
|
||||
|
||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
||||
c.cancelCtx.cancel(false, err)
|
||||
if removeFromParent {
|
||||
// Remove this timerCtx from its parent cancelCtx's children.
|
||||
removeChild(c.cancelCtx.Context, c)
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.timer != nil {
|
||||
c.timer.Stop()
|
||||
c.timer = nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return &valueCtx{parent, key, val}
|
||||
}
|
||||
|
||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
||||
// delegates all other calls to the embedded Context.
|
||||
type valueCtx struct {
|
||||
Context
|
||||
key, val interface{}
|
||||
}
|
||||
|
||||
func (c *valueCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
||||
}
|
||||
|
||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
||||
if c.key == key {
|
||||
return c.val
|
||||
}
|
||||
return c.Context.Value(key)
|
||||
}
|
109
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
Normal file
109
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "time"
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
//
|
||||
// WithCancel arranges for Done to be closed when cancel is called;
|
||||
// WithDeadline arranges for Done to be closed when the deadline
|
||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||
// elapses.
|
||||
//
|
||||
// Done is provided for use in select statements:
|
||||
//
|
||||
// // Stream generates values with DoSomething and sends them to out
|
||||
// // until DoSomething returns an error or ctx.Done is closed.
|
||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
||||
// for {
|
||||
// v, err := DoSomething(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return ctx.Err()
|
||||
// case out <- v:
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||
// a Done channel for cancelation.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
//
|
||||
// A key identifies a specific value in a Context. Functions that wish
|
||||
// to store values in Context typically allocate a key in a global
|
||||
// variable then use that key as the argument to context.WithValue and
|
||||
// Context.Value. A key can be any type that supports equality;
|
||||
// packages should define keys as an unexported type to avoid
|
||||
// collisions.
|
||||
//
|
||||
// Packages that define a Context key should provide type-safe accessors
|
||||
// for the values stores using that key:
|
||||
//
|
||||
// // Package user defines a User type that's stored in Contexts.
|
||||
// package user
|
||||
//
|
||||
// import "golang.org/x/net/context"
|
||||
//
|
||||
// // User is the type of value stored in the Contexts.
|
||||
// type User struct {...}
|
||||
//
|
||||
// // key is an unexported type for keys defined in this package.
|
||||
// // This prevents collisions with keys defined in other packages.
|
||||
// type key int
|
||||
//
|
||||
// // userKey is the key for user.User values in Contexts. It is
|
||||
// // unexported; clients use user.NewContext and user.FromContext
|
||||
// // instead of using this key directly.
|
||||
// var userKey key = 0
|
||||
//
|
||||
// // NewContext returns a new Context that carries value u.
|
||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||
// return context.WithValue(ctx, userKey, u)
|
||||
// }
|
||||
//
|
||||
// // FromContext returns the User value stored in ctx, if any.
|
||||
// func FromContext(ctx context.Context) (*User, bool) {
|
||||
// u, ok := ctx.Value(userKey).(*User)
|
||||
// return u, ok
|
||||
// }
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc func()
|
50
vendor/golang.org/x/net/http/httpguts/guts.go
generated
vendored
Normal file
50
vendor/golang.org/x/net/http/httpguts/guts.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package httpguts provides functions implementing various details
|
||||
// of the HTTP specification.
|
||||
//
|
||||
// This package is shared by the standard library (which vendors it)
|
||||
// and x/net/http2. It comes with no API stability promise.
|
||||
package httpguts
|
||||
|
||||
import (
|
||||
"net/textproto"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidTrailerHeader reports whether name is a valid header field name to appear
|
||||
// in trailers.
|
||||
// See RFC 7230, Section 4.1.2
|
||||
func ValidTrailerHeader(name string) bool {
|
||||
name = textproto.CanonicalMIMEHeaderKey(name)
|
||||
if strings.HasPrefix(name, "If-") || badTrailer[name] {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var badTrailer = map[string]bool{
|
||||
"Authorization": true,
|
||||
"Cache-Control": true,
|
||||
"Connection": true,
|
||||
"Content-Encoding": true,
|
||||
"Content-Length": true,
|
||||
"Content-Range": true,
|
||||
"Content-Type": true,
|
||||
"Expect": true,
|
||||
"Host": true,
|
||||
"Keep-Alive": true,
|
||||
"Max-Forwards": true,
|
||||
"Pragma": true,
|
||||
"Proxy-Authenticate": true,
|
||||
"Proxy-Authorization": true,
|
||||
"Proxy-Connection": true,
|
||||
"Range": true,
|
||||
"Realm": true,
|
||||
"Te": true,
|
||||
"Trailer": true,
|
||||
"Transfer-Encoding": true,
|
||||
"Www-Authenticate": true,
|
||||
}
|
346
vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
Normal file
346
vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
Normal file
@ -0,0 +1,346 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpguts
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
func IsTokenRune(r rune) bool {
|
||||
i := int(r)
|
||||
return i < len(isTokenTable) && isTokenTable[i]
|
||||
}
|
||||
|
||||
func isNotToken(r rune) bool {
|
||||
return !IsTokenRune(r)
|
||||
}
|
||||
|
||||
// HeaderValuesContainsToken reports whether any string in values
|
||||
// contains the provided token, ASCII case-insensitively.
|
||||
func HeaderValuesContainsToken(values []string, token string) bool {
|
||||
for _, v := range values {
|
||||
if headerValueContainsToken(v, token) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isOWS reports whether b is an optional whitespace byte, as defined
|
||||
// by RFC 7230 section 3.2.3.
|
||||
func isOWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||
|
||||
// trimOWS returns x with all optional whitespace removes from the
|
||||
// beginning and end.
|
||||
func trimOWS(x string) string {
|
||||
// TODO: consider using strings.Trim(x, " \t") instead,
|
||||
// if and when it's fast enough. See issue 10292.
|
||||
// But this ASCII-only code will probably always beat UTF-8
|
||||
// aware code.
|
||||
for len(x) > 0 && isOWS(x[0]) {
|
||||
x = x[1:]
|
||||
}
|
||||
for len(x) > 0 && isOWS(x[len(x)-1]) {
|
||||
x = x[:len(x)-1]
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// headerValueContainsToken reports whether v (assumed to be a
|
||||
// 0#element, in the ABNF extension described in RFC 7230 section 7)
|
||||
// contains token amongst its comma-separated tokens, ASCII
|
||||
// case-insensitively.
|
||||
func headerValueContainsToken(v string, token string) bool {
|
||||
v = trimOWS(v)
|
||||
if comma := strings.IndexByte(v, ','); comma != -1 {
|
||||
return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
|
||||
}
|
||||
return tokenEqual(v, token)
|
||||
}
|
||||
|
||||
// lowerASCII returns the ASCII lowercase version of b.
|
||||
func lowerASCII(b byte) byte {
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return b + ('a' - 'A')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
|
||||
func tokenEqual(t1, t2 string) bool {
|
||||
if len(t1) != len(t2) {
|
||||
return false
|
||||
}
|
||||
for i, b := range t1 {
|
||||
if b >= utf8.RuneSelf {
|
||||
// No UTF-8 or non-ASCII allowed in tokens.
|
||||
return false
|
||||
}
|
||||
if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isLWS reports whether b is linear white space, according
|
||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
func isLWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||
|
||||
// isCTL reports whether b is a control byte, according
|
||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
func isCTL(b byte) bool {
|
||||
const del = 0x7f // a CTL
|
||||
return b < ' ' || b == del
|
||||
}
|
||||
|
||||
// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
|
||||
// HTTP/2 imposes the additional restriction that uppercase ASCII
|
||||
// letters are not allowed.
|
||||
//
|
||||
// RFC 7230 says:
|
||||
// header-field = field-name ":" OWS field-value OWS
|
||||
// field-name = token
|
||||
// token = 1*tchar
|
||||
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
||||
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
||||
func ValidHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !IsTokenRune(r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidHostHeader reports whether h is a valid host header.
|
||||
func ValidHostHeader(h string) bool {
|
||||
// The latest spec is actually this:
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc7230#section-5.4
|
||||
// Host = uri-host [ ":" port ]
|
||||
//
|
||||
// Where uri-host is:
|
||||
// http://tools.ietf.org/html/rfc3986#section-3.2.2
|
||||
//
|
||||
// But we're going to be much more lenient for now and just
|
||||
// search for any byte that's not a valid byte in any of those
|
||||
// expressions.
|
||||
for i := 0; i < len(h); i++ {
|
||||
if !validHostByte[h[i]] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// See the validHostHeader comment.
|
||||
var validHostByte = [256]bool{
|
||||
'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
|
||||
'8': true, '9': true,
|
||||
|
||||
'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
|
||||
'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
|
||||
'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
|
||||
'y': true, 'z': true,
|
||||
|
||||
'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
|
||||
'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
|
||||
'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
|
||||
'Y': true, 'Z': true,
|
||||
|
||||
'!': true, // sub-delims
|
||||
'$': true, // sub-delims
|
||||
'%': true, // pct-encoded (and used in IPv6 zones)
|
||||
'&': true, // sub-delims
|
||||
'(': true, // sub-delims
|
||||
')': true, // sub-delims
|
||||
'*': true, // sub-delims
|
||||
'+': true, // sub-delims
|
||||
',': true, // sub-delims
|
||||
'-': true, // unreserved
|
||||
'.': true, // unreserved
|
||||
':': true, // IPv6address + Host expression's optional port
|
||||
';': true, // sub-delims
|
||||
'=': true, // sub-delims
|
||||
'[': true,
|
||||
'\'': true, // sub-delims
|
||||
']': true,
|
||||
'_': true, // unreserved
|
||||
'~': true, // unreserved
|
||||
}
|
||||
|
||||
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
|
||||
//
|
||||
// message-header = field-name ":" [ field-value ]
|
||||
// field-value = *( field-content | LWS )
|
||||
// field-content = <the OCTETs making up the field-value
|
||||
// and consisting of either *TEXT or combinations
|
||||
// of token, separators, and quoted-string>
|
||||
//
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
|
||||
//
|
||||
// TEXT = <any OCTET except CTLs,
|
||||
// but including LWS>
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
//
|
||||
// RFC 7230 says:
|
||||
// field-value = *( field-content / obs-fold )
|
||||
// obj-fold = N/A to http2, and deprecated
|
||||
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
||||
// field-vchar = VCHAR / obs-text
|
||||
// obs-text = %x80-FF
|
||||
// VCHAR = "any visible [USASCII] character"
|
||||
//
|
||||
// http2 further says: "Similarly, HTTP/2 allows header field values
|
||||
// that are not valid. While most of the values that can be encoded
|
||||
// will not alter header field parsing, carriage return (CR, ASCII
|
||||
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
||||
// 0x0) might be exploited by an attacker if they are translated
|
||||
// verbatim. Any request or response that contains a character not
|
||||
// permitted in a header field value MUST be treated as malformed
|
||||
// (Section 8.1.2.6). Valid characters are defined by the
|
||||
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
||||
//
|
||||
// This function does not (yet?) properly handle the rejection of
|
||||
// strings that begin or end with SP or HTAB.
|
||||
func ValidHeaderFieldValue(v string) bool {
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if isCTL(b) && !isLWS(b) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PunycodeHostPort returns the IDNA Punycode version
|
||||
// of the provided "host" or "host:port" string.
|
||||
func PunycodeHostPort(v string) (string, error) {
|
||||
if isASCII(v) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(v)
|
||||
if err != nil {
|
||||
// The input 'v' argument was just a "host" argument,
|
||||
// without a port. This error should not be returned
|
||||
// to the caller.
|
||||
host = v
|
||||
port = ""
|
||||
}
|
||||
host, err = idna.ToASCII(host)
|
||||
if err != nil {
|
||||
// Non-UTF-8? Not representable in Punycode, in any
|
||||
// case.
|
||||
return "", err
|
||||
}
|
||||
if port == "" {
|
||||
return host, nil
|
||||
}
|
||||
return net.JoinHostPort(host, port), nil
|
||||
}
|
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*~
|
||||
h2i/h2i
|
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
20
vendor/golang.org/x/net/http2/README
generated
vendored
Normal file
20
vendor/golang.org/x/net/http2/README
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
This is a work-in-progress HTTP/2 implementation for Go.
|
||||
|
||||
It will eventually live in the Go standard library and won't require
|
||||
any changes to your code to use. It will just be automatic.
|
||||
|
||||
Status:
|
||||
|
||||
* The server support is pretty good. A few things are missing
|
||||
but are being worked on.
|
||||
* The client work has just started but shares a lot of code
|
||||
is coming along much quicker.
|
||||
|
||||
Docs are at https://godoc.org/golang.org/x/net/http2
|
||||
|
||||
Demo test server at https://http2.golang.org/
|
||||
|
||||
Help & bug reports welcome!
|
||||
|
||||
Contributing: https://golang.org/doc/contribute.html
|
||||
Bugs: https://golang.org/issue/new?title=x/net/http2:+
|
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
Normal file
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
Normal file
@ -0,0 +1,641 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
// A list of the possible cipher suite ids. Taken from
|
||||
// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
|
||||
|
||||
const (
|
||||
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
|
||||
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
|
||||
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
|
||||
// Reserved uint16 = 0x001C-1D
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
|
||||
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
|
||||
// Reserved uint16 = 0x0047-4F
|
||||
// Reserved uint16 = 0x0050-58
|
||||
// Reserved uint16 = 0x0059-5C
|
||||
// Unassigned uint16 = 0x005D-5F
|
||||
// Reserved uint16 = 0x0060-66
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
|
||||
// Unassigned uint16 = 0x006E-83
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
|
||||
// Unassigned uint16 = 0x00C6-FE
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
|
||||
// Unassigned uint16 = 0x01-55,*
|
||||
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
|
||||
// Unassigned uint16 = 0x5601 - 0xC000
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
|
||||
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
|
||||
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
|
||||
// Unassigned uint16 = 0xC0B0-FF
|
||||
// Unassigned uint16 = 0xC1-CB,*
|
||||
// Unassigned uint16 = 0xCC00-A7
|
||||
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
|
||||
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
|
||||
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
|
||||
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
|
||||
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
|
||||
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
|
||||
)
|
||||
|
||||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||
// References:
|
||||
// https://tools.ietf.org/html/rfc7540#appendix-A
|
||||
// Reject cipher suites from Appendix A.
|
||||
// "This list includes those cipher suites that do not
|
||||
// offer an ephemeral key exchange and those that are
|
||||
// based on the TLS null, stream or block cipher type"
|
||||
func isBadCipher(cipher uint16) bool {
|
||||
switch cipher {
|
||||
case cipher_TLS_NULL_WITH_NULL_NULL,
|
||||
cipher_TLS_RSA_WITH_NULL_MD5,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
278
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
278
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
@ -0,0 +1,278 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Transport code's client connection pooling.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||
type ClientConnPool interface {
|
||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||
MarkDead(*ClientConn)
|
||||
}
|
||||
|
||||
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
|
||||
// implementations which can close their idle connections.
|
||||
type clientConnPoolIdleCloser interface {
|
||||
ClientConnPool
|
||||
closeIdleConnections()
|
||||
}
|
||||
|
||||
var (
|
||||
_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
|
||||
_ clientConnPoolIdleCloser = noDialClientConnPool{}
|
||||
)
|
||||
|
||||
// TODO: use singleflight for dialing and addConnCalls?
|
||||
type clientConnPool struct {
|
||||
t *Transport
|
||||
|
||||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
||||
// TODO: add support for sharing conns based on cert names
|
||||
// (e.g. share conn for googleapis.com and appspot.com)
|
||||
conns map[string][]*ClientConn // key is host:port
|
||||
dialing map[string]*dialCall // currently in-flight dials
|
||||
keys map[*ClientConn][]string
|
||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
||||
}
|
||||
|
||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, dialOnMiss)
|
||||
}
|
||||
|
||||
const (
|
||||
dialOnMiss = true
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
// shouldTraceGetConn reports whether getClientConn should call any
|
||||
// ClientTrace.GetConn hook associated with the http.Request.
|
||||
//
|
||||
// This complexity is needed to avoid double calls of the GetConn hook
|
||||
// during the back-and-forth between net/http and x/net/http2 (when the
|
||||
// net/http.Transport is upgraded to also speak http2), as well as support
|
||||
// the case where x/net/http2 is being used directly.
|
||||
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
|
||||
// If our Transport wasn't made via ConfigureTransport, always
|
||||
// trace the GetConn hook if provided, because that means the
|
||||
// http2 package is being used directly and it's the one
|
||||
// dialing, as opposed to net/http.
|
||||
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
|
||||
return true
|
||||
}
|
||||
// Otherwise, only use the GetConn hook if this connection has
|
||||
// been used previously for other requests. For fresh
|
||||
// connections, the net/http package does the dialing.
|
||||
return !st.freshConn
|
||||
}
|
||||
|
||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||
// It gets its own connection.
|
||||
traceGetConn(req, addr)
|
||||
const singleUse = true
|
||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if st := cc.idleState(); st.canTakeNewRequest {
|
||||
if p.shouldTraceGetConn(st) {
|
||||
traceGetConn(req, addr)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
if !dialOnMiss {
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
traceGetConn(req, addr)
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
return call.res, call.err
|
||||
}
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
_ incomparable
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
err error // valid after done is closed
|
||||
}
|
||||
|
||||
// requires p.mu is held.
|
||||
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||
if call, ok := p.dialing[addr]; ok {
|
||||
// A dial is already in-flight. Don't start another.
|
||||
return call
|
||||
}
|
||||
call := &dialCall{p: p, done: make(chan struct{})}
|
||||
if p.dialing == nil {
|
||||
p.dialing = make(map[string]*dialCall)
|
||||
}
|
||||
p.dialing[addr] = call
|
||||
go call.dial(addr)
|
||||
return call
|
||||
}
|
||||
|
||||
// run in its own goroutine.
|
||||
func (c *dialCall) dial(addr string) {
|
||||
const singleUse = false // shared conn
|
||||
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
|
||||
close(c.done)
|
||||
|
||||
c.p.mu.Lock()
|
||||
delete(c.p.dialing, addr)
|
||||
if c.err == nil {
|
||||
c.p.addConnLocked(addr, c.res)
|
||||
}
|
||||
c.p.mu.Unlock()
|
||||
}
|
||||
|
||||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
||||
// already exist. It coalesces concurrent calls with the same key.
|
||||
// This is used by the http1 Transport code when it creates a new connection. Because
|
||||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
||||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
||||
// This code decides which ones live or die.
|
||||
// The return value used is whether c was used.
|
||||
// c is never closed.
|
||||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[key] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
call, dup := p.addConnCalls[key]
|
||||
if !dup {
|
||||
if p.addConnCalls == nil {
|
||||
p.addConnCalls = make(map[string]*addConnCall)
|
||||
}
|
||||
call = &addConnCall{
|
||||
p: p,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
p.addConnCalls[key] = call
|
||||
go call.run(t, key, c)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
<-call.done
|
||||
if call.err != nil {
|
||||
return false, call.err
|
||||
}
|
||||
return !dup, nil
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
_ incomparable
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||
cc, err := t.NewClientConn(tc)
|
||||
|
||||
p := c.p
|
||||
p.mu.Lock()
|
||||
if err != nil {
|
||||
c.err = err
|
||||
} else {
|
||||
p.addConnLocked(key, cc)
|
||||
}
|
||||
delete(p.addConnCalls, key)
|
||||
p.mu.Unlock()
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
if v == cc {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.conns == nil {
|
||||
p.conns = make(map[string][]*ClientConn)
|
||||
}
|
||||
if p.keys == nil {
|
||||
p.keys = make(map[*ClientConn][]string)
|
||||
}
|
||||
p.conns[key] = append(p.conns[key], cc)
|
||||
p.keys[cc] = append(p.keys[cc], key)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
for _, key := range p.keys[cc] {
|
||||
vv, ok := p.conns[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
newList := filterOutClientConn(vv, cc)
|
||||
if len(newList) > 0 {
|
||||
p.conns[key] = newList
|
||||
} else {
|
||||
delete(p.conns, key)
|
||||
}
|
||||
}
|
||||
delete(p.keys, cc)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) closeIdleConnections() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
// TODO: don't close a cc if it was just added to the pool
|
||||
// milliseconds ago and has never been used. There's currently
|
||||
// a small race window with the HTTP/1 Transport's integration
|
||||
// where it can add an idle conn just before using it, and
|
||||
// somebody else can concurrently call CloseIdleConns and
|
||||
// break some caller's RoundTrip.
|
||||
for _, vv := range p.conns {
|
||||
for _, cc := range vv {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
||||
out := in[:0]
|
||||
for _, v := range in {
|
||||
if v != exclude {
|
||||
out = append(out, v)
|
||||
}
|
||||
}
|
||||
// If we filtered it out, zero out the last item to prevent
|
||||
// the GC from seeing it.
|
||||
if len(in) != len(out) {
|
||||
in[len(in)-1] = nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
||||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
||||
// connection instead.
|
||||
type noDialClientConnPool struct{ *clientConnPool }
|
||||
|
||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, noDialOnMiss)
|
||||
}
|
146
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
Normal file
146
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffer chunks are allocated from a pool to reduce pressure on GC.
|
||||
// The maximum wasted space per dataBuffer is 2x the largest size class,
|
||||
// which happens when the dataBuffer has multiple chunks and there is
|
||||
// one unread byte in both the first and last chunks. We use a few size
|
||||
// classes to minimize overheads for servers that typically receive very
|
||||
// small request bodies.
|
||||
//
|
||||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
||||
// improved enough that we can instead allocate chunks like this:
|
||||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
||||
var (
|
||||
dataChunkSizeClasses = []int{
|
||||
1 << 10,
|
||||
2 << 10,
|
||||
4 << 10,
|
||||
8 << 10,
|
||||
16 << 10,
|
||||
}
|
||||
dataChunkPools = [...]sync.Pool{
|
||||
{New: func() interface{} { return make([]byte, 1<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 2<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 4<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 8<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 16<<10) }},
|
||||
}
|
||||
)
|
||||
|
||||
func getDataBufferChunk(size int64) []byte {
|
||||
i := 0
|
||||
for ; i < len(dataChunkSizeClasses)-1; i++ {
|
||||
if size <= int64(dataChunkSizeClasses[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return dataChunkPools[i].Get().([]byte)
|
||||
}
|
||||
|
||||
func putDataBufferChunk(p []byte) {
|
||||
for i, n := range dataChunkSizeClasses {
|
||||
if len(p) == n {
|
||||
dataChunkPools[i].Put(p)
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||
}
|
||||
|
||||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
||||
// Each dataBuffer is used to read DATA frames on a single stream.
|
||||
// The buffer is divided into chunks so the server can limit the
|
||||
// total memory used by a single connection without limiting the
|
||||
// request body size on any single stream.
|
||||
type dataBuffer struct {
|
||||
chunks [][]byte
|
||||
r int // next byte to read is chunks[0][r]
|
||||
w int // next byte to write is chunks[len(chunks)-1][w]
|
||||
size int // total buffered bytes
|
||||
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
|
||||
}
|
||||
|
||||
var errReadEmpty = errors.New("read from empty dataBuffer")
|
||||
|
||||
// Read copies bytes from the buffer into p.
|
||||
// It is an error to read when no data is available.
|
||||
func (b *dataBuffer) Read(p []byte) (int, error) {
|
||||
if b.size == 0 {
|
||||
return 0, errReadEmpty
|
||||
}
|
||||
var ntotal int
|
||||
for len(p) > 0 && b.size > 0 {
|
||||
readFrom := b.bytesFromFirstChunk()
|
||||
n := copy(p, readFrom)
|
||||
p = p[n:]
|
||||
ntotal += n
|
||||
b.r += n
|
||||
b.size -= n
|
||||
// If the first chunk has been consumed, advance to the next chunk.
|
||||
if b.r == len(b.chunks[0]) {
|
||||
putDataBufferChunk(b.chunks[0])
|
||||
end := len(b.chunks) - 1
|
||||
copy(b.chunks[:end], b.chunks[1:])
|
||||
b.chunks[end] = nil
|
||||
b.chunks = b.chunks[:end]
|
||||
b.r = 0
|
||||
}
|
||||
}
|
||||
return ntotal, nil
|
||||
}
|
||||
|
||||
func (b *dataBuffer) bytesFromFirstChunk() []byte {
|
||||
if len(b.chunks) == 1 {
|
||||
return b.chunks[0][b.r:b.w]
|
||||
}
|
||||
return b.chunks[0][b.r:]
|
||||
}
|
||||
|
||||
// Len returns the number of bytes of the unread portion of the buffer.
|
||||
func (b *dataBuffer) Len() int {
|
||||
return b.size
|
||||
}
|
||||
|
||||
// Write appends p to the buffer.
|
||||
func (b *dataBuffer) Write(p []byte) (int, error) {
|
||||
ntotal := len(p)
|
||||
for len(p) > 0 {
|
||||
// If the last chunk is empty, allocate a new chunk. Try to allocate
|
||||
// enough to fully copy p plus any additional bytes we expect to
|
||||
// receive. However, this may allocate less than len(p).
|
||||
want := int64(len(p))
|
||||
if b.expected > want {
|
||||
want = b.expected
|
||||
}
|
||||
chunk := b.lastChunkOrAlloc(want)
|
||||
n := copy(chunk[b.w:], p)
|
||||
p = p[n:]
|
||||
b.w += n
|
||||
b.size += n
|
||||
b.expected -= int64(n)
|
||||
}
|
||||
return ntotal, nil
|
||||
}
|
||||
|
||||
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
|
||||
if len(b.chunks) != 0 {
|
||||
last := b.chunks[len(b.chunks)-1]
|
||||
if b.w < len(last) {
|
||||
return last
|
||||
}
|
||||
}
|
||||
chunk := getDataBufferChunk(want)
|
||||
b.chunks = append(b.chunks, chunk)
|
||||
b.w = 0
|
||||
return chunk
|
||||
}
|
133
vendor/golang.org/x/net/http2/errors.go
generated
vendored
Normal file
133
vendor/golang.org/x/net/http2/errors.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
||||
type ErrCode uint32
|
||||
|
||||
const (
|
||||
ErrCodeNo ErrCode = 0x0
|
||||
ErrCodeProtocol ErrCode = 0x1
|
||||
ErrCodeInternal ErrCode = 0x2
|
||||
ErrCodeFlowControl ErrCode = 0x3
|
||||
ErrCodeSettingsTimeout ErrCode = 0x4
|
||||
ErrCodeStreamClosed ErrCode = 0x5
|
||||
ErrCodeFrameSize ErrCode = 0x6
|
||||
ErrCodeRefusedStream ErrCode = 0x7
|
||||
ErrCodeCancel ErrCode = 0x8
|
||||
ErrCodeCompression ErrCode = 0x9
|
||||
ErrCodeConnect ErrCode = 0xa
|
||||
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
||||
ErrCodeInadequateSecurity ErrCode = 0xc
|
||||
ErrCodeHTTP11Required ErrCode = 0xd
|
||||
)
|
||||
|
||||
var errCodeName = map[ErrCode]string{
|
||||
ErrCodeNo: "NO_ERROR",
|
||||
ErrCodeProtocol: "PROTOCOL_ERROR",
|
||||
ErrCodeInternal: "INTERNAL_ERROR",
|
||||
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
|
||||
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
|
||||
ErrCodeStreamClosed: "STREAM_CLOSED",
|
||||
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
|
||||
ErrCodeRefusedStream: "REFUSED_STREAM",
|
||||
ErrCodeCancel: "CANCEL",
|
||||
ErrCodeCompression: "COMPRESSION_ERROR",
|
||||
ErrCodeConnect: "CONNECT_ERROR",
|
||||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
||||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
||||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
||||
}
|
||||
|
||||
func (e ErrCode) String() string {
|
||||
if s, ok := errCodeName[e]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection.
|
||||
type ConnectionError ErrCode
|
||||
|
||||
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
|
||||
|
||||
// StreamError is an error that only affects one stream within an
|
||||
// HTTP/2 connection.
|
||||
type StreamError struct {
|
||||
StreamID uint32
|
||||
Code ErrCode
|
||||
Cause error // optional additional detail
|
||||
}
|
||||
|
||||
func streamError(id uint32, code ErrCode) StreamError {
|
||||
return StreamError{StreamID: id, Code: code}
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
if e.Cause != nil {
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
|
||||
}
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||
}
|
||||
|
||||
// 6.9.1 The Flow Control Window
|
||||
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
||||
// window to exceed this maximum it MUST terminate either the stream
|
||||
// or the connection, as appropriate. For streams, [...]; for the
|
||||
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
||||
type goAwayFlowError struct{}
|
||||
|
||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||
|
||||
// connError represents an HTTP/2 ConnectionError error code, along
|
||||
// with a string (for debugging) explaining why.
|
||||
//
|
||||
// Errors of this type are only returned by the frame parser functions
|
||||
// and converted into ConnectionError(Code), after stashing away
|
||||
// the Reason into the Framer's errDetail field, accessible via
|
||||
// the (*Framer).ErrorDetail method.
|
||||
type connError struct {
|
||||
Code ErrCode // the ConnectionError error code
|
||||
Reason string // additional reason
|
||||
}
|
||||
|
||||
func (e connError) Error() string {
|
||||
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
|
||||
}
|
||||
|
||||
type pseudoHeaderError string
|
||||
|
||||
func (e pseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("invalid pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type duplicatePseudoHeaderError string
|
||||
|
||||
func (e duplicatePseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldNameError string
|
||||
|
||||
func (e headerFieldNameError) Error() string {
|
||||
return fmt.Sprintf("invalid header field name %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldValueError string
|
||||
|
||||
func (e headerFieldValueError) Error() string {
|
||||
return fmt.Sprintf("invalid header field value %q", string(e))
|
||||
}
|
||||
|
||||
var (
|
||||
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
|
||||
errPseudoAfterRegular = errors.New("pseudo header field after regular")
|
||||
)
|
52
vendor/golang.org/x/net/http2/flow.go
generated
vendored
Normal file
52
vendor/golang.org/x/net/http2/flow.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Flow control
|
||||
|
||||
package http2
|
||||
|
||||
// flow is the flow control window's size.
|
||||
type flow struct {
|
||||
_ incomparable
|
||||
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// A flow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
||||
// conn points to the shared connection-level flow that is
|
||||
// shared by all streams on that conn. It is nil for the flow
|
||||
// that's on the conn directly.
|
||||
conn *flow
|
||||
}
|
||||
|
||||
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
|
||||
|
||||
func (f *flow) available() int32 {
|
||||
n := f.n
|
||||
if f.conn != nil && f.conn.n < n {
|
||||
n = f.conn.n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (f *flow) take(n int32) {
|
||||
if n > f.available() {
|
||||
panic("internal error: took too much")
|
||||
}
|
||||
f.n -= n
|
||||
if f.conn != nil {
|
||||
f.conn.n -= n
|
||||
}
|
||||
}
|
||||
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *flow) add(n int32) bool {
|
||||
sum := f.n + n
|
||||
if (sum > n) == (f.n > 0) {
|
||||
f.n = sum
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
1614
vendor/golang.org/x/net/http2/frame.go
generated
vendored
Normal file
1614
vendor/golang.org/x/net/http2/frame.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
29
vendor/golang.org/x/net/http2/go111.go
generated
vendored
Normal file
29
vendor/golang.org/x/net/http2/go111.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
)
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
}
|
||||
return nil
|
||||
}
|
170
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
Normal file
170
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Defensive debug-only utility to track that functions run on the
|
||||
// goroutine that they're supposed to.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
||||
|
||||
type goroutineLock uint64
|
||||
|
||||
func newGoroutineLock() goroutineLock {
|
||||
if !DebugGoroutines {
|
||||
return 0
|
||||
}
|
||||
return goroutineLock(curGoroutineID())
|
||||
}
|
||||
|
||||
func (g goroutineLock) check() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() != uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
func (g goroutineLock) checkNotOn() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() == uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
var goroutineSpace = []byte("goroutine ")
|
||||
|
||||
func curGoroutineID() uint64 {
|
||||
bp := littleBuf.Get().(*[]byte)
|
||||
defer littleBuf.Put(bp)
|
||||
b := *bp
|
||||
b = b[:runtime.Stack(b, false)]
|
||||
// Parse the 4707 out of "goroutine 4707 ["
|
||||
b = bytes.TrimPrefix(b, goroutineSpace)
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("No space found in %q", b))
|
||||
}
|
||||
b = b[:i]
|
||||
n, err := parseUintBytes(b, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
var littleBuf = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, 64)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
||||
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
|
||||
var cutoff, maxVal uint64
|
||||
|
||||
if bitSize == 0 {
|
||||
bitSize = int(strconv.IntSize)
|
||||
}
|
||||
|
||||
s0 := s
|
||||
switch {
|
||||
case len(s) < 1:
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
|
||||
case 2 <= base && base <= 36:
|
||||
// valid base; nothing to do
|
||||
|
||||
case base == 0:
|
||||
// Look for octal, hex prefix.
|
||||
switch {
|
||||
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
|
||||
base = 16
|
||||
s = s[2:]
|
||||
if len(s) < 1 {
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
case s[0] == '0':
|
||||
base = 8
|
||||
default:
|
||||
base = 10
|
||||
}
|
||||
|
||||
default:
|
||||
err = errors.New("invalid base " + strconv.Itoa(base))
|
||||
goto Error
|
||||
}
|
||||
|
||||
n = 0
|
||||
cutoff = cutoff64(base)
|
||||
maxVal = 1<<uint(bitSize) - 1
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
var v byte
|
||||
d := s[i]
|
||||
switch {
|
||||
case '0' <= d && d <= '9':
|
||||
v = d - '0'
|
||||
case 'a' <= d && d <= 'z':
|
||||
v = d - 'a' + 10
|
||||
case 'A' <= d && d <= 'Z':
|
||||
v = d - 'A' + 10
|
||||
default:
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
if int(v) >= base {
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
|
||||
if n >= cutoff {
|
||||
// n*base overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n *= uint64(base)
|
||||
|
||||
n1 := n + uint64(v)
|
||||
if n1 < n || n1 > maxVal {
|
||||
// n+v overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n = n1
|
||||
}
|
||||
|
||||
return n, nil
|
||||
|
||||
Error:
|
||||
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
|
||||
}
|
||||
|
||||
// Return the first number n such that n*base >= 1<<64.
|
||||
func cutoff64(base int) uint64 {
|
||||
if base < 2 {
|
||||
return 0
|
||||
}
|
||||
return (1<<64-1)/uint64(base) + 1
|
||||
}
|
88
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
Normal file
88
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
commonBuildOnce sync.Once
|
||||
commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
|
||||
commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
|
||||
)
|
||||
|
||||
func buildCommonHeaderMapsOnce() {
|
||||
commonBuildOnce.Do(buildCommonHeaderMaps)
|
||||
}
|
||||
|
||||
func buildCommonHeaderMaps() {
|
||||
common := []string{
|
||||
"accept",
|
||||
"accept-charset",
|
||||
"accept-encoding",
|
||||
"accept-language",
|
||||
"accept-ranges",
|
||||
"age",
|
||||
"access-control-allow-origin",
|
||||
"allow",
|
||||
"authorization",
|
||||
"cache-control",
|
||||
"content-disposition",
|
||||
"content-encoding",
|
||||
"content-language",
|
||||
"content-length",
|
||||
"content-location",
|
||||
"content-range",
|
||||
"content-type",
|
||||
"cookie",
|
||||
"date",
|
||||
"etag",
|
||||
"expect",
|
||||
"expires",
|
||||
"from",
|
||||
"host",
|
||||
"if-match",
|
||||
"if-modified-since",
|
||||
"if-none-match",
|
||||
"if-unmodified-since",
|
||||
"last-modified",
|
||||
"link",
|
||||
"location",
|
||||
"max-forwards",
|
||||
"proxy-authenticate",
|
||||
"proxy-authorization",
|
||||
"range",
|
||||
"referer",
|
||||
"refresh",
|
||||
"retry-after",
|
||||
"server",
|
||||
"set-cookie",
|
||||
"strict-transport-security",
|
||||
"trailer",
|
||||
"transfer-encoding",
|
||||
"user-agent",
|
||||
"vary",
|
||||
"via",
|
||||
"www-authenticate",
|
||||
}
|
||||
commonLowerHeader = make(map[string]string, len(common))
|
||||
commonCanonHeader = make(map[string]string, len(common))
|
||||
for _, v := range common {
|
||||
chk := http.CanonicalHeaderKey(v)
|
||||
commonLowerHeader[chk] = v
|
||||
commonCanonHeader[v] = chk
|
||||
}
|
||||
}
|
||||
|
||||
func lowerHeader(v string) string {
|
||||
buildCommonHeaderMapsOnce()
|
||||
if s, ok := commonLowerHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return strings.ToLower(v)
|
||||
}
|
240
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
240
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
uint32Max = ^uint32(0)
|
||||
initialHeaderTableSize = 4096
|
||||
)
|
||||
|
||||
type Encoder struct {
|
||||
dynTab dynamicTable
|
||||
// minSize is the minimum table size set by
|
||||
// SetMaxDynamicTableSize after the previous Header Table Size
|
||||
// Update.
|
||||
minSize uint32
|
||||
// maxSizeLimit is the maximum table size this encoder
|
||||
// supports. This will protect the encoder from too large
|
||||
// size.
|
||||
maxSizeLimit uint32
|
||||
// tableSizeUpdate indicates whether "Header Table Size
|
||||
// Update" is required.
|
||||
tableSizeUpdate bool
|
||||
w io.Writer
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
||||
// encoded data is written to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
e := &Encoder{
|
||||
minSize: uint32Max,
|
||||
maxSizeLimit: initialHeaderTableSize,
|
||||
tableSizeUpdate: false,
|
||||
w: w,
|
||||
}
|
||||
e.dynTab.table.init()
|
||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||
return e
|
||||
}
|
||||
|
||||
// WriteField encodes f into a single Write to e's underlying Writer.
|
||||
// This function may also produce bytes for "Header Table Size Update"
|
||||
// if necessary. If produced, it is done before encoding f.
|
||||
func (e *Encoder) WriteField(f HeaderField) error {
|
||||
e.buf = e.buf[:0]
|
||||
|
||||
if e.tableSizeUpdate {
|
||||
e.tableSizeUpdate = false
|
||||
if e.minSize < e.dynTab.maxSize {
|
||||
e.buf = appendTableSize(e.buf, e.minSize)
|
||||
}
|
||||
e.minSize = uint32Max
|
||||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
||||
}
|
||||
|
||||
idx, nameValueMatch := e.searchTable(f)
|
||||
if nameValueMatch {
|
||||
e.buf = appendIndexed(e.buf, idx)
|
||||
} else {
|
||||
indexing := e.shouldIndex(f)
|
||||
if indexing {
|
||||
e.dynTab.add(f)
|
||||
}
|
||||
|
||||
if idx == 0 {
|
||||
e.buf = appendNewName(e.buf, f, indexing)
|
||||
} else {
|
||||
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
||||
}
|
||||
}
|
||||
n, err := e.w.Write(e.buf)
|
||||
if err == nil && n != len(e.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// searchTable searches f in both stable and dynamic header tables.
|
||||
// The static header table is searched first. Only when there is no
|
||||
// exact match for both name and value, the dynamic header table is
|
||||
// then searched. If there is no match, i is 0. If both name and value
|
||||
// match, i is the matched index and nameValueMatch becomes true. If
|
||||
// only name matches, i points to that index and nameValueMatch
|
||||
// becomes false.
|
||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
i, nameValueMatch = staticTable.search(f)
|
||||
if nameValueMatch {
|
||||
return i, true
|
||||
}
|
||||
|
||||
j, nameValueMatch := e.dynTab.table.search(f)
|
||||
if nameValueMatch || (i == 0 && j != 0) {
|
||||
return j + uint64(staticTable.len()), nameValueMatch
|
||||
}
|
||||
|
||||
return i, false
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||
// The actual size is bounded by the value passed to
|
||||
// SetMaxDynamicTableSizeLimit.
|
||||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
||||
if v > e.maxSizeLimit {
|
||||
v = e.maxSizeLimit
|
||||
}
|
||||
if v < e.minSize {
|
||||
e.minSize = v
|
||||
}
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||
// 4096, which is the same size of the default dynamic header table
|
||||
// size described in HPACK specification. If the current maximum
|
||||
// dynamic header table size is strictly greater than v, "Header Table
|
||||
// Size Update" will be done in the next WriteField call and the
|
||||
// maximum dynamic header table size is truncated to v.
|
||||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
||||
e.maxSizeLimit = v
|
||||
if e.dynTab.maxSize > v {
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldIndex reports whether f should be indexed.
|
||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendIndexed(dst []byte, i uint64) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, i)
|
||||
dst[first] |= 0x80
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendNewName appends f, as encoded in one of "Literal Header field
|
||||
// - New Name" representation variants, to dst and returns the
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
dst = appendHpackString(dst, f.Name)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendIndexedName appends f and index i referring indexed name
|
||||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
||||
// representation variants, to dst and returns the extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
||||
first := len(dst)
|
||||
var n byte
|
||||
if indexing {
|
||||
n = 6
|
||||
} else {
|
||||
n = 4
|
||||
}
|
||||
dst = appendVarInt(dst, n, i)
|
||||
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendTableSize(dst []byte, v uint32) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 5, uint64(v))
|
||||
dst[first] |= 0x20
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendVarInt appends i, as encoded in variable integer form using n
|
||||
// bit prefix, to dst and returns the extended buffer.
|
||||
//
|
||||
// See
|
||||
// http://http2.github.io/http2-spec/compression.html#integer.representation
|
||||
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||
k := uint64((1 << n) - 1)
|
||||
if i < k {
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
dst = append(dst, byte(k))
|
||||
i -= k
|
||||
for ; i >= 128; i >>= 7 {
|
||||
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||
}
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
|
||||
// appendHpackString appends s, as encoded in "String Literal"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
//
|
||||
// s will be encoded in Huffman codes only when it produces strictly
|
||||
// shorter byte string.
|
||||
func appendHpackString(dst []byte, s string) []byte {
|
||||
huffmanLength := HuffmanEncodeLength(s)
|
||||
if huffmanLength < uint64(len(s)) {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, huffmanLength)
|
||||
dst = AppendHuffmanString(dst, s)
|
||||
dst[first] |= 0x80
|
||||
} else {
|
||||
dst = appendVarInt(dst, 7, uint64(len(s)))
|
||||
dst = append(dst, s...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
||||
// for "Never Indexed" representation is returned. If sensitive is
|
||||
// false and indexing is true, type byte for "Incremental Indexing"
|
||||
// representation is returned. Otherwise, type byte for "Without
|
||||
// Indexing" is returned.
|
||||
func encodeTypeByte(indexing, sensitive bool) byte {
|
||||
if sensitive {
|
||||
return 0x10
|
||||
}
|
||||
if indexing {
|
||||
return 0x40
|
||||
}
|
||||
return 0
|
||||
}
|
504
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
504
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
@ -0,0 +1,504 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hpack implements HPACK, a compression format for
|
||||
// efficiently representing HTTP header fields in the context of HTTP/2.
|
||||
//
|
||||
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A DecodingError is something the spec defines as a decoding error.
|
||||
type DecodingError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (de DecodingError) Error() string {
|
||||
return fmt.Sprintf("decoding error: %v", de.Err)
|
||||
}
|
||||
|
||||
// An InvalidIndexError is returned when an encoder references a table
|
||||
// entry before the static table or after the end of the dynamic table.
|
||||
type InvalidIndexError int
|
||||
|
||||
func (e InvalidIndexError) Error() string {
|
||||
return fmt.Sprintf("invalid indexed representation index %d", int(e))
|
||||
}
|
||||
|
||||
// A HeaderField is a name-value pair. Both the name and value are
|
||||
// treated as opaque sequences of octets.
|
||||
type HeaderField struct {
|
||||
Name, Value string
|
||||
|
||||
// Sensitive means that this header field should never be
|
||||
// indexed.
|
||||
Sensitive bool
|
||||
}
|
||||
|
||||
// IsPseudo reports whether the header field is an http2 pseudo header.
|
||||
// That is, it reports whether it starts with a colon.
|
||||
// It is not otherwise guaranteed to be a valid pseudo header field,
|
||||
// though.
|
||||
func (hf HeaderField) IsPseudo() bool {
|
||||
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
||||
}
|
||||
|
||||
func (hf HeaderField) String() string {
|
||||
var suffix string
|
||||
if hf.Sensitive {
|
||||
suffix = " (sensitive)"
|
||||
}
|
||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||
}
|
||||
|
||||
// Size returns the size of an entry per RFC 7541 section 4.1.
|
||||
func (hf HeaderField) Size() uint32 {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||
// "The size of the dynamic table is the sum of the size of
|
||||
// its entries. The size of an entry is the sum of its name's
|
||||
// length in octets (as defined in Section 5.2), its value's
|
||||
// length in octets (see Section 5.2), plus 32. The size of
|
||||
// an entry is calculated using the length of the name and
|
||||
// value without any Huffman encoding applied."
|
||||
|
||||
// This can overflow if somebody makes a large HeaderField
|
||||
// Name and/or Value by hand, but we don't care, because that
|
||||
// won't happen on the wire because the encoding doesn't allow
|
||||
// it.
|
||||
return uint32(len(hf.Name) + len(hf.Value) + 32)
|
||||
}
|
||||
|
||||
// A Decoder is the decoding context for incremental processing of
|
||||
// header blocks.
|
||||
type Decoder struct {
|
||||
dynTab dynamicTable
|
||||
emit func(f HeaderField)
|
||||
|
||||
emitEnabled bool // whether calls to emit are enabled
|
||||
maxStrLen int // 0 means unlimited
|
||||
|
||||
// buf is the unparsed buffer. It's only written to
|
||||
// saveBuf if it was truncated in the middle of a header
|
||||
// block. Because it's usually not owned, we can only
|
||||
// process it under Write.
|
||||
buf []byte // not owned; only valid during Write
|
||||
|
||||
// saveBuf is previous data passed to Write which we weren't able
|
||||
// to fully parse before. Unlike buf, we own this data.
|
||||
saveBuf bytes.Buffer
|
||||
|
||||
firstField bool // processing the first field of the header block
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder with the provided maximum dynamic
|
||||
// table size. The emitFunc will be called for each valid field
|
||||
// parsed, in the same goroutine as calls to Write, before Write returns.
|
||||
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||
d := &Decoder{
|
||||
emit: emitFunc,
|
||||
emitEnabled: true,
|
||||
firstField: true,
|
||||
}
|
||||
d.dynTab.table.init()
|
||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||
return d
|
||||
}
|
||||
|
||||
// ErrStringLength is returned by Decoder.Write when the max string length
|
||||
// (as configured by Decoder.SetMaxStringLength) would be violated.
|
||||
var ErrStringLength = errors.New("hpack: string too long")
|
||||
|
||||
// SetMaxStringLength sets the maximum size of a HeaderField name or
|
||||
// value string. If a string exceeds this length (even after any
|
||||
// decompression), Write will return ErrStringLength.
|
||||
// A value of 0 means unlimited and is the default from NewDecoder.
|
||||
func (d *Decoder) SetMaxStringLength(n int) {
|
||||
d.maxStrLen = n
|
||||
}
|
||||
|
||||
// SetEmitFunc changes the callback used when new header fields
|
||||
// are decoded.
|
||||
// It must be non-nil. It does not affect EmitEnabled.
|
||||
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
|
||||
d.emit = emitFunc
|
||||
}
|
||||
|
||||
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
|
||||
// should be called. The default is true.
|
||||
//
|
||||
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
|
||||
// while still decoding and keeping in-sync with decoder state, but
|
||||
// without doing unnecessary decompression or generating unnecessary
|
||||
// garbage for header fields past the limit.
|
||||
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
|
||||
|
||||
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
|
||||
// are currently enabled. The default is true.
|
||||
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
|
||||
|
||||
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
||||
// underlying buffers for garbage reasons.
|
||||
|
||||
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
|
||||
// stream (via dynamic table size updates) may set the maximum size
|
||||
// to.
|
||||
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.allowedMaxSize = v
|
||||
}
|
||||
|
||||
type dynamicTable struct {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
||||
table headerFieldTable
|
||||
size uint32 // in bytes
|
||||
maxSize uint32 // current maxSize
|
||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||
dt.maxSize = v
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) add(f HeaderField) {
|
||||
dt.table.addEntry(f)
|
||||
dt.size += f.Size()
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
// If we're too big, evict old stuff.
|
||||
func (dt *dynamicTable) evict() {
|
||||
var n int
|
||||
for dt.size > dt.maxSize && n < dt.table.len() {
|
||||
dt.size -= dt.table.ents[n].Size()
|
||||
n++
|
||||
}
|
||||
dt.table.evictOldest(n)
|
||||
}
|
||||
|
||||
func (d *Decoder) maxTableIndex() int {
|
||||
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
|
||||
// the dynamic table to 2^32 bytes, where each entry will occupy more than
|
||||
// one byte. Further, the staticTable has a fixed, small length.
|
||||
return d.dynTab.table.len() + staticTable.len()
|
||||
}
|
||||
|
||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||
// See Section 2.3.3.
|
||||
if i == 0 {
|
||||
return
|
||||
}
|
||||
if i <= uint64(staticTable.len()) {
|
||||
return staticTable.ents[i-1], true
|
||||
}
|
||||
if i > uint64(d.maxTableIndex()) {
|
||||
return
|
||||
}
|
||||
// In the dynamic table, newer entries have lower indices.
|
||||
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
|
||||
// the reversed dynamic table.
|
||||
dt := d.dynTab.table
|
||||
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
|
||||
}
|
||||
|
||||
// Decode decodes an entire block.
|
||||
//
|
||||
// TODO: remove this method and make it incremental later? This is
|
||||
// easier for debugging now.
|
||||
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
|
||||
var hf []HeaderField
|
||||
saveFunc := d.emit
|
||||
defer func() { d.emit = saveFunc }()
|
||||
d.emit = func(f HeaderField) { hf = append(hf, f) }
|
||||
if _, err := d.Write(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hf, nil
|
||||
}
|
||||
|
||||
// Close declares that the decoding is complete and resets the Decoder
|
||||
// to be reused again for a new header block. If there is any remaining
|
||||
// data in the decoder's buffer, Close returns an error.
|
||||
func (d *Decoder) Close() error {
|
||||
if d.saveBuf.Len() > 0 {
|
||||
d.saveBuf.Reset()
|
||||
return DecodingError{errors.New("truncated headers")}
|
||||
}
|
||||
d.firstField = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Write(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
// Prevent state machine CPU attacks (making us redo
|
||||
// work up to the point of finding out we don't have
|
||||
// enough data)
|
||||
return
|
||||
}
|
||||
// Only copy the data if we have to. Optimistically assume
|
||||
// that p will contain a complete header block.
|
||||
if d.saveBuf.Len() == 0 {
|
||||
d.buf = p
|
||||
} else {
|
||||
d.saveBuf.Write(p)
|
||||
d.buf = d.saveBuf.Bytes()
|
||||
d.saveBuf.Reset()
|
||||
}
|
||||
|
||||
for len(d.buf) > 0 {
|
||||
err = d.parseHeaderFieldRepr()
|
||||
if err == errNeedMore {
|
||||
// Extra paranoia, making sure saveBuf won't
|
||||
// get too large. All the varint and string
|
||||
// reading code earlier should already catch
|
||||
// overlong things and return ErrStringLength,
|
||||
// but keep this as a last resort.
|
||||
const varIntOverhead = 8 // conservative
|
||||
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
|
||||
return 0, ErrStringLength
|
||||
}
|
||||
d.saveBuf.Write(d.buf)
|
||||
return len(p), nil
|
||||
}
|
||||
d.firstField = false
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// errNeedMore is an internal sentinel error value that means the
|
||||
// buffer is truncated and we need to read more data before we can
|
||||
// continue parsing.
|
||||
var errNeedMore = errors.New("need more data")
|
||||
|
||||
type indexType int
|
||||
|
||||
const (
|
||||
indexedTrue indexType = iota
|
||||
indexedFalse
|
||||
indexedNever
|
||||
)
|
||||
|
||||
func (v indexType) indexed() bool { return v == indexedTrue }
|
||||
func (v indexType) sensitive() bool { return v == indexedNever }
|
||||
|
||||
// returns errNeedMore if there isn't enough data available.
|
||||
// any other error is fatal.
|
||||
// consumes d.buf iff it returns nil.
|
||||
// precondition: must be called with len(d.buf) > 0
|
||||
func (d *Decoder) parseHeaderFieldRepr() error {
|
||||
b := d.buf[0]
|
||||
switch {
|
||||
case b&128 != 0:
|
||||
// Indexed representation.
|
||||
// High bit set?
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
|
||||
return d.parseFieldIndexed()
|
||||
case b&192 == 64:
|
||||
// 6.2.1 Literal Header Field with Incremental Indexing
|
||||
// 0b10xxxxxx: top two bits are 10
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
|
||||
return d.parseFieldLiteral(6, indexedTrue)
|
||||
case b&240 == 0:
|
||||
// 6.2.2 Literal Header Field without Indexing
|
||||
// 0b0000xxxx: top four bits are 0000
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
|
||||
return d.parseFieldLiteral(4, indexedFalse)
|
||||
case b&240 == 16:
|
||||
// 6.2.3 Literal Header Field never Indexed
|
||||
// 0b0001xxxx: top four bits are 0001
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
|
||||
return d.parseFieldLiteral(4, indexedNever)
|
||||
case b&224 == 32:
|
||||
// 6.3 Dynamic Table Size Update
|
||||
// Top three bits are '001'.
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
|
||||
return d.parseDynamicTableSizeUpdate()
|
||||
}
|
||||
|
||||
return DecodingError{errors.New("invalid encoding")}
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldIndexed() error {
|
||||
buf := d.buf
|
||||
idx, buf, err := readVarInt(7, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hf, ok := d.at(idx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(idx)}
|
||||
}
|
||||
d.buf = buf
|
||||
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
buf := d.buf
|
||||
nameIdx, buf, err := readVarInt(n, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(nameIdx)}
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
}
|
||||
hf.Sensitive = it.sensitive()
|
||||
return d.callEmit(hf)
|
||||
}
|
||||
|
||||
func (d *Decoder) callEmit(hf HeaderField) error {
|
||||
if d.maxStrLen != 0 {
|
||||
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
}
|
||||
if d.emitEnabled {
|
||||
d.emit(hf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
||||
// RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
|
||||
// beginning of the first header block following the change to the dynamic table size.
|
||||
if !d.firstField && d.dynTab.size > 0 {
|
||||
return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
|
||||
}
|
||||
|
||||
buf := d.buf
|
||||
size, buf, err := readVarInt(5, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size > uint64(d.dynTab.allowedMaxSize) {
|
||||
return DecodingError{errors.New("dynamic table size update too large")}
|
||||
}
|
||||
d.dynTab.setMaxSize(uint32(size))
|
||||
d.buf = buf
|
||||
return nil
|
||||
}
|
||||
|
||||
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
|
||||
|
||||
// readVarInt reads an unsigned variable length integer off the
|
||||
// beginning of p. n is the parameter as described in
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
|
||||
//
|
||||
// n must always be between 1 and 8.
|
||||
//
|
||||
// The returned remain buffer is either a smaller suffix of p, or err != nil.
|
||||
// The error is errNeedMore if p doesn't contain a complete integer.
|
||||
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
if n < 1 || n > 8 {
|
||||
panic("bad n")
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, p, errNeedMore
|
||||
}
|
||||
i = uint64(p[0])
|
||||
if n < 8 {
|
||||
i &= (1 << uint64(n)) - 1
|
||||
}
|
||||
if i < (1<<uint64(n))-1 {
|
||||
return i, p[1:], nil
|
||||
}
|
||||
|
||||
origP := p
|
||||
p = p[1:]
|
||||
var m uint64
|
||||
for len(p) > 0 {
|
||||
b := p[0]
|
||||
p = p[1:]
|
||||
i += uint64(b&127) << m
|
||||
if b&128 == 0 {
|
||||
return i, p, nil
|
||||
}
|
||||
m += 7
|
||||
if m >= 63 { // TODO: proper overflow check. making this up.
|
||||
return 0, origP, errVarintOverflow
|
||||
}
|
||||
}
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString decodes an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
||||
if wantStr {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
229
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
229
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} { return new(bytes.Buffer) },
|
||||
}
|
||||
|
||||
// HuffmanDecode decodes the string in v and writes the expanded
|
||||
// result to w, returning the number of bytes written to w and the
|
||||
// Write call's return value. At most one Write call is made.
|
||||
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// HuffmanDecodeToString decodes the string in v.
|
||||
func HuffmanDecodeToString(v []byte) (string, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ErrInvalidHuffman is returned for errors found decoding
|
||||
// Huffman-encoded strings.
|
||||
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||
|
||||
// huffmanDecode decodes v to buf.
|
||||
// If maxLen is greater than 0, attempts to write more to buf than
|
||||
// maxLen bytes will return ErrStringLength.
|
||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
rootHuffmanNode := getRootHuffmanNode()
|
||||
n := rootHuffmanNode
|
||||
// cur is the bit buffer that has not been fed into n.
|
||||
// cbits is the number of low order bits in cur that are valid.
|
||||
// sbits is the number of bits of the symbol prefix being decoded.
|
||||
cur, cbits, sbits := uint(0), uint8(0), uint8(0)
|
||||
for _, b := range v {
|
||||
cur = cur<<8 | uint(b)
|
||||
cbits += 8
|
||||
sbits += 8
|
||||
for cbits >= 8 {
|
||||
idx := byte(cur >> (cbits - 8))
|
||||
n = n.children[idx]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children == nil {
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
cbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
sbits = cbits
|
||||
} else {
|
||||
cbits -= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
for cbits > 0 {
|
||||
n = n.children[byte(cur<<(8-cbits))]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children != nil || n.codeLen > cbits {
|
||||
break
|
||||
}
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
cbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
sbits = cbits
|
||||
}
|
||||
if sbits > 7 {
|
||||
// Either there was an incomplete symbol, or overlong padding.
|
||||
// Both are decoding errors per RFC 7541 section 5.2.
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if mask := uint(1<<cbits - 1); cur&mask != mask {
|
||||
// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
|
||||
type node struct {
|
||||
_ incomparable
|
||||
|
||||
// children is non-nil for internal nodes
|
||||
children *[256]*node
|
||||
|
||||
// The following are only valid if children is nil:
|
||||
codeLen uint8 // number of bits that led to the output of sym
|
||||
sym byte // output symbol
|
||||
}
|
||||
|
||||
func newInternalNode() *node {
|
||||
return &node{children: new([256]*node)}
|
||||
}
|
||||
|
||||
var (
|
||||
buildRootOnce sync.Once
|
||||
lazyRootHuffmanNode *node
|
||||
)
|
||||
|
||||
func getRootHuffmanNode() *node {
|
||||
buildRootOnce.Do(buildRootHuffmanNode)
|
||||
return lazyRootHuffmanNode
|
||||
}
|
||||
|
||||
func buildRootHuffmanNode() {
|
||||
if len(huffmanCodes) != 256 {
|
||||
panic("unexpected size")
|
||||
}
|
||||
lazyRootHuffmanNode = newInternalNode()
|
||||
for i, code := range huffmanCodes {
|
||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||
}
|
||||
}
|
||||
|
||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
||||
cur := lazyRootHuffmanNode
|
||||
for codeLen > 8 {
|
||||
codeLen -= 8
|
||||
i := uint8(code >> codeLen)
|
||||
if cur.children[i] == nil {
|
||||
cur.children[i] = newInternalNode()
|
||||
}
|
||||
cur = cur.children[i]
|
||||
}
|
||||
shift := 8 - codeLen
|
||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||
for i := start; i < start+end; i++ {
|
||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
||||
// and returns the extended buffer.
|
||||
func AppendHuffmanString(dst []byte, s string) []byte {
|
||||
rembits := uint8(8)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
if rembits == 8 {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
|
||||
}
|
||||
|
||||
if rembits < 8 {
|
||||
// special EOS symbol
|
||||
code := uint32(0x3fffffff)
|
||||
nbits := uint8(30)
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// HuffmanEncodeLength returns the number of bytes required to encode
|
||||
// s in Huffman codes. The result is round up to byte boundary.
|
||||
func HuffmanEncodeLength(s string) uint64 {
|
||||
n := uint64(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
n += uint64(huffmanCodeLen[s[i]])
|
||||
}
|
||||
return (n + 7) / 8
|
||||
}
|
||||
|
||||
// appendByteToHuffmanCode appends Huffman code for c to dst and
|
||||
// returns the extended buffer and the remaining bits in the last
|
||||
// element. The appending is not byte aligned and the remaining bits
|
||||
// in the last element of dst is given in rembits.
|
||||
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
|
||||
code := huffmanCodes[c]
|
||||
nbits := huffmanCodeLen[c]
|
||||
|
||||
for {
|
||||
if rembits > nbits {
|
||||
t := uint8(code << (rembits - nbits))
|
||||
dst[len(dst)-1] |= t
|
||||
rembits -= nbits
|
||||
break
|
||||
}
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
|
||||
nbits -= rembits
|
||||
rembits = 8
|
||||
|
||||
if nbits == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
|
||||
return dst, rembits
|
||||
}
|
479
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
479
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
@ -0,0 +1,479 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// headerFieldTable implements a list of HeaderFields.
|
||||
// This is used to implement the static and dynamic tables.
|
||||
type headerFieldTable struct {
|
||||
// For static tables, entries are never evicted.
|
||||
//
|
||||
// For dynamic tables, entries are evicted from ents[0] and added to the end.
|
||||
// Each entry has a unique id that starts at one and increments for each
|
||||
// entry that is added. This unique id is stable across evictions, meaning
|
||||
// it can be used as a pointer to a specific entry. As in hpack, unique ids
|
||||
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
|
||||
//
|
||||
// Zero is not a valid unique id.
|
||||
//
|
||||
// evictCount should not overflow in any remotely practical situation. In
|
||||
// practice, we will have one dynamic table per HTTP/2 connection. If we
|
||||
// assume a very powerful server that handles 1M QPS per connection and each
|
||||
// request adds (then evicts) 100 entries from the table, it would still take
|
||||
// 2M years for evictCount to overflow.
|
||||
ents []HeaderField
|
||||
evictCount uint64
|
||||
|
||||
// byName maps a HeaderField name to the unique id of the newest entry with
|
||||
// the same name. See above for a definition of "unique id".
|
||||
byName map[string]uint64
|
||||
|
||||
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
|
||||
// entry with the same name and value. See above for a definition of "unique id".
|
||||
byNameValue map[pairNameValue]uint64
|
||||
}
|
||||
|
||||
type pairNameValue struct {
|
||||
name, value string
|
||||
}
|
||||
|
||||
func (t *headerFieldTable) init() {
|
||||
t.byName = make(map[string]uint64)
|
||||
t.byNameValue = make(map[pairNameValue]uint64)
|
||||
}
|
||||
|
||||
// len reports the number of entries in the table.
|
||||
func (t *headerFieldTable) len() int {
|
||||
return len(t.ents)
|
||||
}
|
||||
|
||||
// addEntry adds a new entry.
|
||||
func (t *headerFieldTable) addEntry(f HeaderField) {
|
||||
id := uint64(t.len()) + t.evictCount + 1
|
||||
t.byName[f.Name] = id
|
||||
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
|
||||
t.ents = append(t.ents, f)
|
||||
}
|
||||
|
||||
// evictOldest evicts the n oldest entries in the table.
|
||||
func (t *headerFieldTable) evictOldest(n int) {
|
||||
if n > t.len() {
|
||||
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
|
||||
}
|
||||
for k := 0; k < n; k++ {
|
||||
f := t.ents[k]
|
||||
id := t.evictCount + uint64(k) + 1
|
||||
if t.byName[f.Name] == id {
|
||||
delete(t.byName, f.Name)
|
||||
}
|
||||
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
|
||||
delete(t.byNameValue, p)
|
||||
}
|
||||
}
|
||||
copy(t.ents, t.ents[n:])
|
||||
for k := t.len() - n; k < t.len(); k++ {
|
||||
t.ents[k] = HeaderField{} // so strings can be garbage collected
|
||||
}
|
||||
t.ents = t.ents[:t.len()-n]
|
||||
if t.evictCount+uint64(n) < t.evictCount {
|
||||
panic("evictCount overflow")
|
||||
}
|
||||
t.evictCount += uint64(n)
|
||||
}
|
||||
|
||||
// search finds f in the table. If there is no match, i is 0.
|
||||
// If both name and value match, i is the matched index and nameValueMatch
|
||||
// becomes true. If only name matches, i points to that index and
|
||||
// nameValueMatch becomes false.
|
||||
//
|
||||
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
|
||||
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
|
||||
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
||||
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
||||
//
|
||||
// All tables are assumed to be a dynamic tables except for the global
|
||||
// staticTable pointer.
|
||||
//
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
if !f.Sensitive {
|
||||
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
|
||||
return t.idToIndex(id), true
|
||||
}
|
||||
}
|
||||
if id := t.byName[f.Name]; id != 0 {
|
||||
return t.idToIndex(id), false
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// idToIndex converts a unique id to an HPACK index.
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
||||
if id <= t.evictCount {
|
||||
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
|
||||
}
|
||||
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
|
||||
if t != staticTable {
|
||||
return uint64(t.len()) - k // dynamic table
|
||||
}
|
||||
return k + 1
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = newStaticTable()
|
||||
var staticTableEntries = [...]HeaderField{
|
||||
{Name: ":authority"},
|
||||
{Name: ":method", Value: "GET"},
|
||||
{Name: ":method", Value: "POST"},
|
||||
{Name: ":path", Value: "/"},
|
||||
{Name: ":path", Value: "/index.html"},
|
||||
{Name: ":scheme", Value: "http"},
|
||||
{Name: ":scheme", Value: "https"},
|
||||
{Name: ":status", Value: "200"},
|
||||
{Name: ":status", Value: "204"},
|
||||
{Name: ":status", Value: "206"},
|
||||
{Name: ":status", Value: "304"},
|
||||
{Name: ":status", Value: "400"},
|
||||
{Name: ":status", Value: "404"},
|
||||
{Name: ":status", Value: "500"},
|
||||
{Name: "accept-charset"},
|
||||
{Name: "accept-encoding", Value: "gzip, deflate"},
|
||||
{Name: "accept-language"},
|
||||
{Name: "accept-ranges"},
|
||||
{Name: "accept"},
|
||||
{Name: "access-control-allow-origin"},
|
||||
{Name: "age"},
|
||||
{Name: "allow"},
|
||||
{Name: "authorization"},
|
||||
{Name: "cache-control"},
|
||||
{Name: "content-disposition"},
|
||||
{Name: "content-encoding"},
|
||||
{Name: "content-language"},
|
||||
{Name: "content-length"},
|
||||
{Name: "content-location"},
|
||||
{Name: "content-range"},
|
||||
{Name: "content-type"},
|
||||
{Name: "cookie"},
|
||||
{Name: "date"},
|
||||
{Name: "etag"},
|
||||
{Name: "expect"},
|
||||
{Name: "expires"},
|
||||
{Name: "from"},
|
||||
{Name: "host"},
|
||||
{Name: "if-match"},
|
||||
{Name: "if-modified-since"},
|
||||
{Name: "if-none-match"},
|
||||
{Name: "if-range"},
|
||||
{Name: "if-unmodified-since"},
|
||||
{Name: "last-modified"},
|
||||
{Name: "link"},
|
||||
{Name: "location"},
|
||||
{Name: "max-forwards"},
|
||||
{Name: "proxy-authenticate"},
|
||||
{Name: "proxy-authorization"},
|
||||
{Name: "range"},
|
||||
{Name: "referer"},
|
||||
{Name: "refresh"},
|
||||
{Name: "retry-after"},
|
||||
{Name: "server"},
|
||||
{Name: "set-cookie"},
|
||||
{Name: "strict-transport-security"},
|
||||
{Name: "transfer-encoding"},
|
||||
{Name: "user-agent"},
|
||||
{Name: "vary"},
|
||||
{Name: "via"},
|
||||
{Name: "www-authenticate"},
|
||||
}
|
||||
|
||||
func newStaticTable() *headerFieldTable {
|
||||
t := &headerFieldTable{}
|
||||
t.init()
|
||||
for _, e := range staticTableEntries[:] {
|
||||
t.addEntry(e)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
0xfffffe2,
|
||||
0xfffffe3,
|
||||
0xfffffe4,
|
||||
0xfffffe5,
|
||||
0xfffffe6,
|
||||
0xfffffe7,
|
||||
0xfffffe8,
|
||||
0xffffea,
|
||||
0x3ffffffc,
|
||||
0xfffffe9,
|
||||
0xfffffea,
|
||||
0x3ffffffd,
|
||||
0xfffffeb,
|
||||
0xfffffec,
|
||||
0xfffffed,
|
||||
0xfffffee,
|
||||
0xfffffef,
|
||||
0xffffff0,
|
||||
0xffffff1,
|
||||
0xffffff2,
|
||||
0x3ffffffe,
|
||||
0xffffff3,
|
||||
0xffffff4,
|
||||
0xffffff5,
|
||||
0xffffff6,
|
||||
0xffffff7,
|
||||
0xffffff8,
|
||||
0xffffff9,
|
||||
0xffffffa,
|
||||
0xffffffb,
|
||||
0x14,
|
||||
0x3f8,
|
||||
0x3f9,
|
||||
0xffa,
|
||||
0x1ff9,
|
||||
0x15,
|
||||
0xf8,
|
||||
0x7fa,
|
||||
0x3fa,
|
||||
0x3fb,
|
||||
0xf9,
|
||||
0x7fb,
|
||||
0xfa,
|
||||
0x16,
|
||||
0x17,
|
||||
0x18,
|
||||
0x0,
|
||||
0x1,
|
||||
0x2,
|
||||
0x19,
|
||||
0x1a,
|
||||
0x1b,
|
||||
0x1c,
|
||||
0x1d,
|
||||
0x1e,
|
||||
0x1f,
|
||||
0x5c,
|
||||
0xfb,
|
||||
0x7ffc,
|
||||
0x20,
|
||||
0xffb,
|
||||
0x3fc,
|
||||
0x1ffa,
|
||||
0x21,
|
||||
0x5d,
|
||||
0x5e,
|
||||
0x5f,
|
||||
0x60,
|
||||
0x61,
|
||||
0x62,
|
||||
0x63,
|
||||
0x64,
|
||||
0x65,
|
||||
0x66,
|
||||
0x67,
|
||||
0x68,
|
||||
0x69,
|
||||
0x6a,
|
||||
0x6b,
|
||||
0x6c,
|
||||
0x6d,
|
||||
0x6e,
|
||||
0x6f,
|
||||
0x70,
|
||||
0x71,
|
||||
0x72,
|
||||
0xfc,
|
||||
0x73,
|
||||
0xfd,
|
||||
0x1ffb,
|
||||
0x7fff0,
|
||||
0x1ffc,
|
||||
0x3ffc,
|
||||
0x22,
|
||||
0x7ffd,
|
||||
0x3,
|
||||
0x23,
|
||||
0x4,
|
||||
0x24,
|
||||
0x5,
|
||||
0x25,
|
||||
0x26,
|
||||
0x27,
|
||||
0x6,
|
||||
0x74,
|
||||
0x75,
|
||||
0x28,
|
||||
0x29,
|
||||
0x2a,
|
||||
0x7,
|
||||
0x2b,
|
||||
0x76,
|
||||
0x2c,
|
||||
0x8,
|
||||
0x9,
|
||||
0x2d,
|
||||
0x77,
|
||||
0x78,
|
||||
0x79,
|
||||
0x7a,
|
||||
0x7b,
|
||||
0x7ffe,
|
||||
0x7fc,
|
||||
0x3ffd,
|
||||
0x1ffd,
|
||||
0xffffffc,
|
||||
0xfffe6,
|
||||
0x3fffd2,
|
||||
0xfffe7,
|
||||
0xfffe8,
|
||||
0x3fffd3,
|
||||
0x3fffd4,
|
||||
0x3fffd5,
|
||||
0x7fffd9,
|
||||
0x3fffd6,
|
||||
0x7fffda,
|
||||
0x7fffdb,
|
||||
0x7fffdc,
|
||||
0x7fffdd,
|
||||
0x7fffde,
|
||||
0xffffeb,
|
||||
0x7fffdf,
|
||||
0xffffec,
|
||||
0xffffed,
|
||||
0x3fffd7,
|
||||
0x7fffe0,
|
||||
0xffffee,
|
||||
0x7fffe1,
|
||||
0x7fffe2,
|
||||
0x7fffe3,
|
||||
0x7fffe4,
|
||||
0x1fffdc,
|
||||
0x3fffd8,
|
||||
0x7fffe5,
|
||||
0x3fffd9,
|
||||
0x7fffe6,
|
||||
0x7fffe7,
|
||||
0xffffef,
|
||||
0x3fffda,
|
||||
0x1fffdd,
|
||||
0xfffe9,
|
||||
0x3fffdb,
|
||||
0x3fffdc,
|
||||
0x7fffe8,
|
||||
0x7fffe9,
|
||||
0x1fffde,
|
||||
0x7fffea,
|
||||
0x3fffdd,
|
||||
0x3fffde,
|
||||
0xfffff0,
|
||||
0x1fffdf,
|
||||
0x3fffdf,
|
||||
0x7fffeb,
|
||||
0x7fffec,
|
||||
0x1fffe0,
|
||||
0x1fffe1,
|
||||
0x3fffe0,
|
||||
0x1fffe2,
|
||||
0x7fffed,
|
||||
0x3fffe1,
|
||||
0x7fffee,
|
||||
0x7fffef,
|
||||
0xfffea,
|
||||
0x3fffe2,
|
||||
0x3fffe3,
|
||||
0x3fffe4,
|
||||
0x7ffff0,
|
||||
0x3fffe5,
|
||||
0x3fffe6,
|
||||
0x7ffff1,
|
||||
0x3ffffe0,
|
||||
0x3ffffe1,
|
||||
0xfffeb,
|
||||
0x7fff1,
|
||||
0x3fffe7,
|
||||
0x7ffff2,
|
||||
0x3fffe8,
|
||||
0x1ffffec,
|
||||
0x3ffffe2,
|
||||
0x3ffffe3,
|
||||
0x3ffffe4,
|
||||
0x7ffffde,
|
||||
0x7ffffdf,
|
||||
0x3ffffe5,
|
||||
0xfffff1,
|
||||
0x1ffffed,
|
||||
0x7fff2,
|
||||
0x1fffe3,
|
||||
0x3ffffe6,
|
||||
0x7ffffe0,
|
||||
0x7ffffe1,
|
||||
0x3ffffe7,
|
||||
0x7ffffe2,
|
||||
0xfffff2,
|
||||
0x1fffe4,
|
||||
0x1fffe5,
|
||||
0x3ffffe8,
|
||||
0x3ffffe9,
|
||||
0xffffffd,
|
||||
0x7ffffe3,
|
||||
0x7ffffe4,
|
||||
0x7ffffe5,
|
||||
0xfffec,
|
||||
0xfffff3,
|
||||
0xfffed,
|
||||
0x1fffe6,
|
||||
0x3fffe9,
|
||||
0x1fffe7,
|
||||
0x1fffe8,
|
||||
0x7ffff3,
|
||||
0x3fffea,
|
||||
0x3fffeb,
|
||||
0x1ffffee,
|
||||
0x1ffffef,
|
||||
0xfffff4,
|
||||
0xfffff5,
|
||||
0x3ffffea,
|
||||
0x7ffff4,
|
||||
0x3ffffeb,
|
||||
0x7ffffe6,
|
||||
0x3ffffec,
|
||||
0x3ffffed,
|
||||
0x7ffffe7,
|
||||
0x7ffffe8,
|
||||
0x7ffffe9,
|
||||
0x7ffffea,
|
||||
0x7ffffeb,
|
||||
0xffffffe,
|
||||
0x7ffffec,
|
||||
0x7ffffed,
|
||||
0x7ffffee,
|
||||
0x7ffffef,
|
||||
0x7fffff0,
|
||||
0x3ffffee,
|
||||
}
|
||||
|
||||
var huffmanCodeLen = [256]uint8{
|
||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
||||
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
||||
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
||||
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
||||
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
||||
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
||||
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
||||
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
||||
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
||||
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
||||
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
||||
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
||||
}
|
385
vendor/golang.org/x/net/http2/http2.go
generated
vendored
Normal file
385
vendor/golang.org/x/net/http2/http2.go
generated
vendored
Normal file
@ -0,0 +1,385 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package http2 implements the HTTP/2 protocol.
|
||||
//
|
||||
// This package is low-level and intended to be used directly by very
|
||||
// few people. Most users will use it indirectly through the automatic
|
||||
// use by the net/http package (from Go 1.6 and later).
|
||||
// For use in earlier Go versions see ConfigureServer. (Transport support
|
||||
// requires Go 1.6 or later)
|
||||
//
|
||||
// See https://http2.github.io/ for more information on HTTP/2.
|
||||
//
|
||||
// See https://http2.golang.org/ for a test server running this code.
|
||||
//
|
||||
package http2 // import "golang.org/x/net/http2"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
)
|
||||
|
||||
var (
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
inTests bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
e := os.Getenv("GODEBUG")
|
||||
if strings.Contains(e, "http2debug=1") {
|
||||
VerboseLogs = true
|
||||
}
|
||||
if strings.Contains(e, "http2debug=2") {
|
||||
VerboseLogs = true
|
||||
logFrameWrites = true
|
||||
logFrameReads = true
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ClientPreface is the string that must be sent by new
|
||||
// connections from clients.
|
||||
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
|
||||
|
||||
// SETTINGS_MAX_FRAME_SIZE default
|
||||
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
|
||||
initialMaxFrameSize = 16384
|
||||
|
||||
// NextProtoTLS is the NPN/ALPN protocol negotiated during
|
||||
// HTTP/2's TLS setup.
|
||||
NextProtoTLS = "h2"
|
||||
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
initialHeaderTableSize = 4096
|
||||
|
||||
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
|
||||
|
||||
defaultMaxReadFrameSize = 1 << 20
|
||||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(ClientPreface)
|
||||
)
|
||||
|
||||
type streamState int
|
||||
|
||||
// HTTP/2 stream states.
|
||||
//
|
||||
// See http://tools.ietf.org/html/rfc7540#section-5.1.
|
||||
//
|
||||
// For simplicity, the server code merges "reserved (local)" into
|
||||
// "half-closed (remote)". This is one less state transition to track.
|
||||
// The only downside is that we send PUSH_PROMISEs slightly less
|
||||
// liberally than allowable. More discussion here:
|
||||
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
|
||||
//
|
||||
// "reserved (remote)" is omitted since the client code does not
|
||||
// support server push.
|
||||
const (
|
||||
stateIdle streamState = iota
|
||||
stateOpen
|
||||
stateHalfClosedLocal
|
||||
stateHalfClosedRemote
|
||||
stateClosed
|
||||
)
|
||||
|
||||
var stateName = [...]string{
|
||||
stateIdle: "Idle",
|
||||
stateOpen: "Open",
|
||||
stateHalfClosedLocal: "HalfClosedLocal",
|
||||
stateHalfClosedRemote: "HalfClosedRemote",
|
||||
stateClosed: "Closed",
|
||||
}
|
||||
|
||||
func (st streamState) String() string {
|
||||
return stateName[st]
|
||||
}
|
||||
|
||||
// Setting is a setting parameter: which setting it is, and its value.
|
||||
type Setting struct {
|
||||
// ID is which setting is being set.
|
||||
// See http://http2.github.io/http2-spec/#SettingValues
|
||||
ID SettingID
|
||||
|
||||
// Val is the value.
|
||||
Val uint32
|
||||
}
|
||||
|
||||
func (s Setting) String() string {
|
||||
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
|
||||
}
|
||||
|
||||
// Valid reports whether the setting is valid.
|
||||
func (s Setting) Valid() error {
|
||||
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
|
||||
switch s.ID {
|
||||
case SettingEnablePush:
|
||||
if s.Val != 1 && s.Val != 0 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
case SettingInitialWindowSize:
|
||||
if s.Val > 1<<31-1 {
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
case SettingMaxFrameSize:
|
||||
if s.Val < 16384 || s.Val > 1<<24-1 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingID is an HTTP/2 setting as defined in
|
||||
// http://http2.github.io/http2-spec/#iana-settings
|
||||
type SettingID uint16
|
||||
|
||||
const (
|
||||
SettingHeaderTableSize SettingID = 0x1
|
||||
SettingEnablePush SettingID = 0x2
|
||||
SettingMaxConcurrentStreams SettingID = 0x3
|
||||
SettingInitialWindowSize SettingID = 0x4
|
||||
SettingMaxFrameSize SettingID = 0x5
|
||||
SettingMaxHeaderListSize SettingID = 0x6
|
||||
)
|
||||
|
||||
var settingName = map[SettingID]string{
|
||||
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
|
||||
SettingEnablePush: "ENABLE_PUSH",
|
||||
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
|
||||
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
|
||||
SettingMaxFrameSize: "MAX_FRAME_SIZE",
|
||||
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
|
||||
}
|
||||
|
||||
func (s SettingID) String() string {
|
||||
if v, ok := settingName[s]; ok {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
||||
}
|
||||
|
||||
// validWireHeaderFieldName reports whether v is a valid header field
|
||||
// name (key). See httpguts.ValidHeaderName for the base rules.
|
||||
//
|
||||
// Further, http2 says:
|
||||
// "Just as in HTTP/1.x, header field names are strings of ASCII
|
||||
// characters that are compared in a case-insensitive
|
||||
// fashion. However, header field names MUST be converted to
|
||||
// lowercase prior to their encoding in HTTP/2. "
|
||||
func validWireHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !httpguts.IsTokenRune(r) {
|
||||
return false
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func httpCodeString(code int) string {
|
||||
switch code {
|
||||
case 200:
|
||||
return "200"
|
||||
case 404:
|
||||
return "404"
|
||||
}
|
||||
return strconv.Itoa(code)
|
||||
}
|
||||
|
||||
// from pkg io
|
||||
type stringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// A gate lets two goroutines coordinate their activities.
|
||||
type gate chan struct{}
|
||||
|
||||
func (g gate) Done() { g <- struct{}{} }
|
||||
func (g gate) Wait() { <-g }
|
||||
|
||||
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
||||
type closeWaiter chan struct{}
|
||||
|
||||
// Init makes a closeWaiter usable.
|
||||
// It exists because so a closeWaiter value can be placed inside a
|
||||
// larger struct and have the Mutex and Cond's memory in the same
|
||||
// allocation.
|
||||
func (cw *closeWaiter) Init() {
|
||||
*cw = make(chan struct{})
|
||||
}
|
||||
|
||||
// Close marks the closeWaiter as closed and unblocks any waiters.
|
||||
func (cw closeWaiter) Close() {
|
||||
close(cw)
|
||||
}
|
||||
|
||||
// Wait waits for the closeWaiter to become closed.
|
||||
func (cw closeWaiter) Wait() {
|
||||
<-cw
|
||||
}
|
||||
|
||||
// bufferedWriter is a buffered writer that writes to w.
|
||||
// Its buffered writer is lazily allocated as needed, to minimize
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
_ incomparable
|
||||
w io.Writer // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
}
|
||||
|
||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
return &bufferedWriter{w: w}
|
||||
}
|
||||
|
||||
// bufWriterPoolBufferSize is the size of bufio.Writer's
|
||||
// buffers created using bufWriterPool.
|
||||
//
|
||||
// TODO: pick a less arbitrary value? this is a bit under
|
||||
// (3 x typical 1500 byte MTU) at least. Other than that,
|
||||
// not much thought went into it.
|
||||
const bufWriterPoolBufferSize = 4 << 10
|
||||
|
||||
var bufWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
|
||||
},
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Available() int {
|
||||
if w.bw == nil {
|
||||
return bufWriterPoolBufferSize
|
||||
}
|
||||
return w.bw.Available()
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||
if w.bw == nil {
|
||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||
bw.Reset(w.w)
|
||||
w.bw = bw
|
||||
}
|
||||
return w.bw.Write(p)
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Flush() error {
|
||||
bw := w.bw
|
||||
if bw == nil {
|
||||
return nil
|
||||
}
|
||||
err := bw.Flush()
|
||||
bw.Reset(nil)
|
||||
bufWriterPool.Put(bw)
|
||||
w.bw = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func mustUint31(v int32) uint32 {
|
||||
if v < 0 || v > 2147483647 {
|
||||
panic("out of range")
|
||||
}
|
||||
return uint32(v)
|
||||
}
|
||||
|
||||
// bodyAllowedForStatus reports whether a given response status code
|
||||
// permits a body. See RFC 7230, section 3.3.
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
return false
|
||||
case status == 204:
|
||||
return false
|
||||
case status == 304:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
_ incomparable
|
||||
msg string
|
||||
timeout bool
|
||||
}
|
||||
|
||||
func (e *httpError) Error() string { return e.msg }
|
||||
func (e *httpError) Timeout() bool { return e.timeout }
|
||||
func (e *httpError) Temporary() bool { return true }
|
||||
|
||||
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
|
||||
|
||||
type connectionStater interface {
|
||||
ConnectionState() tls.ConnectionState
|
||||
}
|
||||
|
||||
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
|
||||
|
||||
type sorter struct {
|
||||
v []string // owned by sorter
|
||||
}
|
||||
|
||||
func (s *sorter) Len() int { return len(s.v) }
|
||||
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
|
||||
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
|
||||
|
||||
// Keys returns the sorted keys of h.
|
||||
//
|
||||
// The returned slice is only valid until s used again or returned to
|
||||
// its pool.
|
||||
func (s *sorter) Keys(h http.Header) []string {
|
||||
keys := s.v[:0]
|
||||
for k := range h {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
s.v = keys
|
||||
sort.Sort(s)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (s *sorter) SortStrings(ss []string) {
|
||||
// Our sorter works on s.v, which sorter owns, so
|
||||
// stash it away while we sort the user's buffer.
|
||||
save := s.v
|
||||
s.v = ss
|
||||
sort.Sort(s)
|
||||
s.v = save
|
||||
}
|
||||
|
||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||
// value. It must be either:
|
||||
//
|
||||
// *) a non-empty string starting with '/'
|
||||
// *) the string '*', for OPTIONS requests.
|
||||
//
|
||||
// For now this is only used a quick check for deciding when to clean
|
||||
// up Opaque URLs before sending requests from the Transport.
|
||||
// See golang.org/issue/16847
|
||||
//
|
||||
// We used to enforce that the path also didn't start with "//", but
|
||||
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
||||
// that part of the spec. See golang.org/issue/19103.
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
20
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
Normal file
20
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
)
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
return nil
|
||||
}
|
168
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
Normal file
168
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
|
||||
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
|
||||
// underlying buffer is an interface. (io.Pipe is always unbuffered)
|
||||
type pipe struct {
|
||||
mu sync.Mutex
|
||||
c sync.Cond // c.L lazily initialized to &p.mu
|
||||
b pipeBuffer // nil when done reading
|
||||
unread int // bytes unread when done
|
||||
err error // read error once empty. non-nil means closed.
|
||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||
donec chan struct{} // closed on error
|
||||
readFn func() // optional code to run in Read before error
|
||||
}
|
||||
|
||||
type pipeBuffer interface {
|
||||
Len() int
|
||||
io.Writer
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (p *pipe) Len() int {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.b == nil {
|
||||
return p.unread
|
||||
}
|
||||
return p.b.Len()
|
||||
}
|
||||
|
||||
// Read waits until data is available and copies bytes
|
||||
// from the buffer into p.
|
||||
func (p *pipe) Read(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
for {
|
||||
if p.breakErr != nil {
|
||||
return 0, p.breakErr
|
||||
}
|
||||
if p.b != nil && p.b.Len() > 0 {
|
||||
return p.b.Read(d)
|
||||
}
|
||||
if p.err != nil {
|
||||
if p.readFn != nil {
|
||||
p.readFn() // e.g. copy trailers
|
||||
p.readFn = nil // not sticky like p.err
|
||||
}
|
||||
p.b = nil
|
||||
return 0, p.err
|
||||
}
|
||||
p.c.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
var errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
|
||||
// Write copies bytes from p into the buffer and wakes a reader.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (p *pipe) Write(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if p.err != nil {
|
||||
return 0, errClosedPipeWrite
|
||||
}
|
||||
if p.breakErr != nil {
|
||||
p.unread += len(d)
|
||||
return len(d), nil // discard when there is no reader
|
||||
}
|
||||
return p.b.Write(d)
|
||||
}
|
||||
|
||||
// CloseWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err after all data has been
|
||||
// read.
|
||||
//
|
||||
// The error must be non-nil.
|
||||
func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
|
||||
|
||||
// BreakWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err immediately, without
|
||||
// waiting for unread data.
|
||||
func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
|
||||
|
||||
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
|
||||
// in the caller's goroutine before returning the error.
|
||||
func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
|
||||
|
||||
func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||
if err == nil {
|
||||
panic("err must be non-nil")
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if *dst != nil {
|
||||
// Already been done.
|
||||
return
|
||||
}
|
||||
p.readFn = fn
|
||||
if dst == &p.breakErr {
|
||||
if p.b != nil {
|
||||
p.unread += p.b.Len()
|
||||
}
|
||||
p.b = nil
|
||||
}
|
||||
*dst = err
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
|
||||
// requires p.mu be held.
|
||||
func (p *pipe) closeDoneLocked() {
|
||||
if p.donec == nil {
|
||||
return
|
||||
}
|
||||
// Close if unclosed. This isn't racy since we always
|
||||
// hold p.mu while closing.
|
||||
select {
|
||||
case <-p.donec:
|
||||
default:
|
||||
close(p.donec)
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
|
||||
func (p *pipe) Err() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.breakErr != nil {
|
||||
return p.breakErr
|
||||
}
|
||||
return p.err
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed if and when this pipe is closed
|
||||
// with CloseWithError.
|
||||
func (p *pipe) Done() <-chan struct{} {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.donec == nil {
|
||||
p.donec = make(chan struct{})
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
// Already hit an error.
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
}
|
||||
return p.donec
|
||||
}
|
2968
vendor/golang.org/x/net/http2/server.go
generated
vendored
Normal file
2968
vendor/golang.org/x/net/http2/server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2760
vendor/golang.org/x/net/http2/transport.go
generated
vendored
Normal file
2760
vendor/golang.org/x/net/http2/transport.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
365
vendor/golang.org/x/net/http2/write.go
generated
vendored
Normal file
365
vendor/golang.org/x/net/http2/write.go
generated
vendored
Normal file
@ -0,0 +1,365 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
// writeFramer is implemented by any type that is used to write frames.
|
||||
type writeFramer interface {
|
||||
writeFrame(writeContext) error
|
||||
|
||||
// staysWithinBuffer reports whether this writer promises that
|
||||
// it will only write less than or equal to size bytes, and it
|
||||
// won't Flush the write context.
|
||||
staysWithinBuffer(size int) bool
|
||||
}
|
||||
|
||||
// writeContext is the interface needed by the various frame writer
|
||||
// types below. All the writeFrame methods below are scheduled via the
|
||||
// frame writing scheduler (see writeScheduler in writesched.go).
|
||||
//
|
||||
// This interface is implemented by *serverConn.
|
||||
//
|
||||
// TODO: decide whether to a) use this in the client code (which didn't
|
||||
// end up using this yet, because it has a simpler design, not
|
||||
// currently implementing priorities), or b) delete this and
|
||||
// make the server code a bit more concrete.
|
||||
type writeContext interface {
|
||||
Framer() *Framer
|
||||
Flush() error
|
||||
CloseConn() error
|
||||
// HeaderEncoder returns an HPACK encoder that writes to the
|
||||
// returned buffer.
|
||||
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
|
||||
}
|
||||
|
||||
// writeEndsStream reports whether w writes a frame that will transition
|
||||
// the stream to a half-closed local state. This returns false for RST_STREAM,
|
||||
// which closes the entire stream (not just the local half).
|
||||
func writeEndsStream(w writeFramer) bool {
|
||||
switch v := w.(type) {
|
||||
case *writeData:
|
||||
return v.endStream
|
||||
case *writeResHeaders:
|
||||
return v.endStream
|
||||
case nil:
|
||||
// This can only happen if the caller reuses w after it's
|
||||
// been intentionally nil'ed out to prevent use. Keep this
|
||||
// here to catch future refactoring breaking it.
|
||||
panic("writeEndsStream called on nil writeFramer")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type flushFrameWriter struct{}
|
||||
|
||||
func (flushFrameWriter) writeFrame(ctx writeContext) error {
|
||||
return ctx.Flush()
|
||||
}
|
||||
|
||||
func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
|
||||
|
||||
type writeSettings []Setting
|
||||
|
||||
func (s writeSettings) staysWithinBuffer(max int) bool {
|
||||
const settingSize = 6 // uint16 + uint32
|
||||
return frameHeaderLen+settingSize*len(s) <= max
|
||||
|
||||
}
|
||||
|
||||
func (s writeSettings) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettings([]Setting(s)...)
|
||||
}
|
||||
|
||||
type writeGoAway struct {
|
||||
maxStreamID uint32
|
||||
code ErrCode
|
||||
}
|
||||
|
||||
func (p *writeGoAway) writeFrame(ctx writeContext) error {
|
||||
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
|
||||
ctx.Flush() // ignore error: we're hanging up on them anyway
|
||||
return err
|
||||
}
|
||||
|
||||
func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
|
||||
|
||||
type writeData struct {
|
||||
streamID uint32
|
||||
p []byte
|
||||
endStream bool
|
||||
}
|
||||
|
||||
func (w *writeData) String() string {
|
||||
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
|
||||
}
|
||||
|
||||
func (w *writeData) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
|
||||
}
|
||||
|
||||
func (w *writeData) staysWithinBuffer(max int) bool {
|
||||
return frameHeaderLen+len(w.p) <= max
|
||||
}
|
||||
|
||||
// handlerPanicRST is the message sent from handler goroutines when
|
||||
// the handler panics.
|
||||
type handlerPanicRST struct {
|
||||
StreamID uint32
|
||||
}
|
||||
|
||||
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
|
||||
}
|
||||
|
||||
func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
func (se StreamError) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
|
||||
}
|
||||
|
||||
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
type writePingAck struct{ pf *PingFrame }
|
||||
|
||||
func (w writePingAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WritePing(true, w.pf.Data)
|
||||
}
|
||||
|
||||
func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
|
||||
|
||||
type writeSettingsAck struct{}
|
||||
|
||||
func (writeSettingsAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
|
||||
|
||||
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
|
||||
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
|
||||
// for the first/last fragment, respectively.
|
||||
func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
|
||||
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
|
||||
// that all peers must support (16KB). Later we could care
|
||||
// more and send larger frames if the peer advertised it, but
|
||||
// there's little point. Most headers are small anyway (so we
|
||||
// generally won't have CONTINUATION frames), and extra frames
|
||||
// only waste 9 bytes anyway.
|
||||
const maxFrameSize = 16384
|
||||
|
||||
first := true
|
||||
for len(headerBlock) > 0 {
|
||||
frag := headerBlock
|
||||
if len(frag) > maxFrameSize {
|
||||
frag = frag[:maxFrameSize]
|
||||
}
|
||||
headerBlock = headerBlock[len(frag):]
|
||||
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
|
||||
return err
|
||||
}
|
||||
first = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
|
||||
// for HTTP response headers or trailers from a server handler.
|
||||
type writeResHeaders struct {
|
||||
streamID uint32
|
||||
httpResCode int // 0 means no ":status" line
|
||||
h http.Header // may be nil
|
||||
trailers []string // if non-nil, which keys of h to write. nil means all.
|
||||
endStream bool
|
||||
|
||||
date string
|
||||
contentType string
|
||||
contentLength string
|
||||
}
|
||||
|
||||
func encKV(enc *hpack.Encoder, k, v string) {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: server encoding header %q = %q", k, v)
|
||||
}
|
||||
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) staysWithinBuffer(max int) bool {
|
||||
// TODO: this is a common one. It'd be nice to return true
|
||||
// here and get into the fast path if we could be clever and
|
||||
// calculate the size fast enough, or at least a conservative
|
||||
// upper bound that usually fires. (Maybe if w.h and
|
||||
// w.trailers are nil, so we don't need to enumerate it.)
|
||||
// Otherwise I'm afraid that just calculating the length to
|
||||
// answer this question would be slower than the ~2µs benefit.
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
|
||||
if w.httpResCode != 0 {
|
||||
encKV(enc, ":status", httpCodeString(w.httpResCode))
|
||||
}
|
||||
|
||||
encodeHeaders(enc, w.h, w.trailers)
|
||||
|
||||
if w.contentType != "" {
|
||||
encKV(enc, "content-type", w.contentType)
|
||||
}
|
||||
if w.contentLength != "" {
|
||||
encKV(enc, "content-length", w.contentLength)
|
||||
}
|
||||
if w.date != "" {
|
||||
encKV(enc, "date", w.date)
|
||||
}
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 && w.trailers == nil {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
||||
if firstFrag {
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: frag,
|
||||
EndStream: w.endStream,
|
||||
EndHeaders: lastFrag,
|
||||
})
|
||||
} else {
|
||||
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
||||
}
|
||||
}
|
||||
|
||||
// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
|
||||
type writePushPromise struct {
|
||||
streamID uint32 // pusher stream
|
||||
method string // for :method
|
||||
url *url.URL // for :scheme, :authority, :path
|
||||
h http.Header
|
||||
|
||||
// Creates an ID for a pushed stream. This runs on serveG just before
|
||||
// the frame is written. The returned ID is copied to promisedID.
|
||||
allocatePromisedID func() (uint32, error)
|
||||
promisedID uint32
|
||||
}
|
||||
|
||||
func (w *writePushPromise) staysWithinBuffer(max int) bool {
|
||||
// TODO: see writeResHeaders.staysWithinBuffer
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *writePushPromise) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
|
||||
encKV(enc, ":method", w.method)
|
||||
encKV(enc, ":scheme", w.url.Scheme)
|
||||
encKV(enc, ":authority", w.url.Host)
|
||||
encKV(enc, ":path", w.url.RequestURI())
|
||||
encodeHeaders(enc, w.h, nil)
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
||||
}
|
||||
|
||||
func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
||||
if firstFrag {
|
||||
return ctx.Framer().WritePushPromise(PushPromiseParam{
|
||||
StreamID: w.streamID,
|
||||
PromiseID: w.promisedID,
|
||||
BlockFragment: frag,
|
||||
EndHeaders: lastFrag,
|
||||
})
|
||||
} else {
|
||||
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
||||
}
|
||||
}
|
||||
|
||||
type write100ContinueHeadersFrame struct {
|
||||
streamID uint32
|
||||
}
|
||||
|
||||
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
encKV(enc, ":status", "100")
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: buf.Bytes(),
|
||||
EndStream: false,
|
||||
EndHeaders: true,
|
||||
})
|
||||
}
|
||||
|
||||
func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
|
||||
// Sloppy but conservative:
|
||||
return 9+2*(len(":status")+len("100")) <= max
|
||||
}
|
||||
|
||||
type writeWindowUpdate struct {
|
||||
streamID uint32 // or 0 for conn-level
|
||||
n uint32
|
||||
}
|
||||
|
||||
func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
|
||||
}
|
||||
|
||||
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
|
||||
// is encoded only if k is in keys.
|
||||
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||
if keys == nil {
|
||||
sorter := sorterPool.Get().(*sorter)
|
||||
// Using defer here, since the returned keys from the
|
||||
// sorter.Keys method is only valid until the sorter
|
||||
// is returned:
|
||||
defer sorterPool.Put(sorter)
|
||||
keys = sorter.Keys(h)
|
||||
}
|
||||
for _, k := range keys {
|
||||
vv := h[k]
|
||||
k = lowerHeader(k)
|
||||
if !validWireHeaderFieldName(k) {
|
||||
// Skip it as backup paranoia. Per
|
||||
// golang.org/issue/14048, these should
|
||||
// already be rejected at a higher level.
|
||||
continue
|
||||
}
|
||||
isTE := k == "transfer-encoding"
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// TODO: return an error? golang.org/issue/14048
|
||||
// For now just omit it.
|
||||
continue
|
||||
}
|
||||
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
||||
if isTE && v != "trailers" {
|
||||
continue
|
||||
}
|
||||
encKV(enc, k, v)
|
||||
}
|
||||
}
|
||||
}
|
248
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
Normal file
248
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
|
||||
// Methods are never called concurrently.
|
||||
type WriteScheduler interface {
|
||||
// OpenStream opens a new stream in the write scheduler.
|
||||
// It is illegal to call this with streamID=0 or with a streamID that is
|
||||
// already open -- the call may panic.
|
||||
OpenStream(streamID uint32, options OpenStreamOptions)
|
||||
|
||||
// CloseStream closes a stream in the write scheduler. Any frames queued on
|
||||
// this stream should be discarded. It is illegal to call this on a stream
|
||||
// that is not open -- the call may panic.
|
||||
CloseStream(streamID uint32)
|
||||
|
||||
// AdjustStream adjusts the priority of the given stream. This may be called
|
||||
// on a stream that has not yet been opened or has been closed. Note that
|
||||
// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
|
||||
// https://tools.ietf.org/html/rfc7540#section-5.1
|
||||
AdjustStream(streamID uint32, priority PriorityParam)
|
||||
|
||||
// Push queues a frame in the scheduler. In most cases, this will not be
|
||||
// called with wr.StreamID()!=0 unless that stream is currently open. The one
|
||||
// exception is RST_STREAM frames, which may be sent on idle or closed streams.
|
||||
Push(wr FrameWriteRequest)
|
||||
|
||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||
// order they are Push'd. No frames should be discarded except by CloseStream.
|
||||
Pop() (wr FrameWriteRequest, ok bool)
|
||||
}
|
||||
|
||||
// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
|
||||
type OpenStreamOptions struct {
|
||||
// PusherID is zero if the stream was initiated by the client. Otherwise,
|
||||
// PusherID names the stream that pushed the newly opened stream.
|
||||
PusherID uint32
|
||||
}
|
||||
|
||||
// FrameWriteRequest is a request to write a frame.
|
||||
type FrameWriteRequest struct {
|
||||
// write is the interface value that does the writing, once the
|
||||
// WriteScheduler has selected this frame to write. The write
|
||||
// functions are all defined in write.go.
|
||||
write writeFramer
|
||||
|
||||
// stream is the stream on which this frame will be written.
|
||||
// nil for non-stream frames like PING and SETTINGS.
|
||||
stream *stream
|
||||
|
||||
// done, if non-nil, must be a buffered channel with space for
|
||||
// 1 message and is sent the return value from write (or an
|
||||
// earlier error) when the frame has been written.
|
||||
done chan error
|
||||
}
|
||||
|
||||
// StreamID returns the id of the stream this frame will be written to.
|
||||
// 0 is used for non-stream frames such as PING and SETTINGS.
|
||||
func (wr FrameWriteRequest) StreamID() uint32 {
|
||||
if wr.stream == nil {
|
||||
if se, ok := wr.write.(StreamError); ok {
|
||||
// (*serverConn).resetStream doesn't set
|
||||
// stream because it doesn't necessarily have
|
||||
// one. So special case this type of write
|
||||
// message.
|
||||
return se.StreamID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return wr.stream.id
|
||||
}
|
||||
|
||||
// isControl reports whether wr is a control frame for MaxQueuedControlFrames
|
||||
// purposes. That includes non-stream frames and RST_STREAM frames.
|
||||
func (wr FrameWriteRequest) isControl() bool {
|
||||
return wr.stream == nil
|
||||
}
|
||||
|
||||
// DataSize returns the number of flow control bytes that must be consumed
|
||||
// to write this entire frame. This is 0 for non-DATA frames.
|
||||
func (wr FrameWriteRequest) DataSize() int {
|
||||
if wd, ok := wr.write.(*writeData); ok {
|
||||
return len(wd.p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Consume consumes min(n, available) bytes from this frame, where available
|
||||
// is the number of flow control bytes available on the stream. Consume returns
|
||||
// 0, 1, or 2 frames, where the integer return value gives the number of frames
|
||||
// returned.
|
||||
//
|
||||
// If flow control prevents consuming any bytes, this returns (_, _, 0). If
|
||||
// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
|
||||
// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
|
||||
// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
|
||||
// underlying stream's flow control budget.
|
||||
func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
|
||||
var empty FrameWriteRequest
|
||||
|
||||
// Non-DATA frames are always consumed whole.
|
||||
wd, ok := wr.write.(*writeData)
|
||||
if !ok || len(wd.p) == 0 {
|
||||
return wr, empty, 1
|
||||
}
|
||||
|
||||
// Might need to split after applying limits.
|
||||
allowed := wr.stream.flow.available()
|
||||
if n < allowed {
|
||||
allowed = n
|
||||
}
|
||||
if wr.stream.sc.maxFrameSize < allowed {
|
||||
allowed = wr.stream.sc.maxFrameSize
|
||||
}
|
||||
if allowed <= 0 {
|
||||
return empty, empty, 0
|
||||
}
|
||||
if len(wd.p) > int(allowed) {
|
||||
wr.stream.flow.take(allowed)
|
||||
consumed := FrameWriteRequest{
|
||||
stream: wr.stream,
|
||||
write: &writeData{
|
||||
streamID: wd.streamID,
|
||||
p: wd.p[:allowed],
|
||||
// Even if the original had endStream set, there
|
||||
// are bytes remaining because len(wd.p) > allowed,
|
||||
// so we know endStream is false.
|
||||
endStream: false,
|
||||
},
|
||||
// Our caller is blocking on the final DATA frame, not
|
||||
// this intermediate frame, so no need to wait.
|
||||
done: nil,
|
||||
}
|
||||
rest := FrameWriteRequest{
|
||||
stream: wr.stream,
|
||||
write: &writeData{
|
||||
streamID: wd.streamID,
|
||||
p: wd.p[allowed:],
|
||||
endStream: wd.endStream,
|
||||
},
|
||||
done: wr.done,
|
||||
}
|
||||
return consumed, rest, 2
|
||||
}
|
||||
|
||||
// The frame is consumed whole.
|
||||
// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
|
||||
wr.stream.flow.take(int32(len(wd.p)))
|
||||
return wr, empty, 1
|
||||
}
|
||||
|
||||
// String is for debugging only.
|
||||
func (wr FrameWriteRequest) String() string {
|
||||
var des string
|
||||
if s, ok := wr.write.(fmt.Stringer); ok {
|
||||
des = s.String()
|
||||
} else {
|
||||
des = fmt.Sprintf("%T", wr.write)
|
||||
}
|
||||
return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
|
||||
}
|
||||
|
||||
// replyToWriter sends err to wr.done and panics if the send must block
|
||||
// This does nothing if wr.done is nil.
|
||||
func (wr *FrameWriteRequest) replyToWriter(err error) {
|
||||
if wr.done == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case wr.done <- err:
|
||||
default:
|
||||
panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
|
||||
}
|
||||
wr.write = nil // prevent use (assume it's tainted after wr.done send)
|
||||
}
|
||||
|
||||
// writeQueue is used by implementations of WriteScheduler.
|
||||
type writeQueue struct {
|
||||
s []FrameWriteRequest
|
||||
}
|
||||
|
||||
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
|
||||
|
||||
func (q *writeQueue) push(wr FrameWriteRequest) {
|
||||
q.s = append(q.s, wr)
|
||||
}
|
||||
|
||||
func (q *writeQueue) shift() FrameWriteRequest {
|
||||
if len(q.s) == 0 {
|
||||
panic("invalid use of queue")
|
||||
}
|
||||
wr := q.s[0]
|
||||
// TODO: less copy-happy queue.
|
||||
copy(q.s, q.s[1:])
|
||||
q.s[len(q.s)-1] = FrameWriteRequest{}
|
||||
q.s = q.s[:len(q.s)-1]
|
||||
return wr
|
||||
}
|
||||
|
||||
// consume consumes up to n bytes from q.s[0]. If the frame is
|
||||
// entirely consumed, it is removed from the queue. If the frame
|
||||
// is partially consumed, the frame is kept with the consumed
|
||||
// bytes removed. Returns true iff any bytes were consumed.
|
||||
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
|
||||
if len(q.s) == 0 {
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
||||
consumed, rest, numresult := q.s[0].Consume(n)
|
||||
switch numresult {
|
||||
case 0:
|
||||
return FrameWriteRequest{}, false
|
||||
case 1:
|
||||
q.shift()
|
||||
case 2:
|
||||
q.s[0] = rest
|
||||
}
|
||||
return consumed, true
|
||||
}
|
||||
|
||||
type writeQueuePool []*writeQueue
|
||||
|
||||
// put inserts an unused writeQueue into the pool.
|
||||
func (p *writeQueuePool) put(q *writeQueue) {
|
||||
for i := range q.s {
|
||||
q.s[i] = FrameWriteRequest{}
|
||||
}
|
||||
q.s = q.s[:0]
|
||||
*p = append(*p, q)
|
||||
}
|
||||
|
||||
// get returns an empty writeQueue.
|
||||
func (p *writeQueuePool) get() *writeQueue {
|
||||
ln := len(*p)
|
||||
if ln == 0 {
|
||||
return new(writeQueue)
|
||||
}
|
||||
x := ln - 1
|
||||
q := (*p)[x]
|
||||
(*p)[x] = nil
|
||||
*p = (*p)[:x]
|
||||
return q
|
||||
}
|
452
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
Normal file
452
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
Normal file
@ -0,0 +1,452 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RFC 7540, Section 5.3.5: the default weight is 16.
|
||||
const priorityDefaultWeight = 15 // 16 = 15 + 1
|
||||
|
||||
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
|
||||
type PriorityWriteSchedulerConfig struct {
|
||||
// MaxClosedNodesInTree controls the maximum number of closed streams to
|
||||
// retain in the priority tree. Setting this to zero saves a small amount
|
||||
// of memory at the cost of performance.
|
||||
//
|
||||
// See RFC 7540, Section 5.3.4:
|
||||
// "It is possible for a stream to become closed while prioritization
|
||||
// information ... is in transit. ... This potentially creates suboptimal
|
||||
// prioritization, since the stream could be given a priority that is
|
||||
// different from what is intended. To avoid these problems, an endpoint
|
||||
// SHOULD retain stream prioritization state for a period after streams
|
||||
// become closed. The longer state is retained, the lower the chance that
|
||||
// streams are assigned incorrect or default priority values."
|
||||
MaxClosedNodesInTree int
|
||||
|
||||
// MaxIdleNodesInTree controls the maximum number of idle streams to
|
||||
// retain in the priority tree. Setting this to zero saves a small amount
|
||||
// of memory at the cost of performance.
|
||||
//
|
||||
// See RFC 7540, Section 5.3.4:
|
||||
// Similarly, streams that are in the "idle" state can be assigned
|
||||
// priority or become a parent of other streams. This allows for the
|
||||
// creation of a grouping node in the dependency tree, which enables
|
||||
// more flexible expressions of priority. Idle streams begin with a
|
||||
// default priority (Section 5.3.5).
|
||||
MaxIdleNodesInTree int
|
||||
|
||||
// ThrottleOutOfOrderWrites enables write throttling to help ensure that
|
||||
// data is delivered in priority order. This works around a race where
|
||||
// stream B depends on stream A and both streams are about to call Write
|
||||
// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
|
||||
// write as much data from B as possible, but this is suboptimal because A
|
||||
// is a higher-priority stream. With throttling enabled, we write a small
|
||||
// amount of data from B to minimize the amount of bandwidth that B can
|
||||
// steal from A.
|
||||
ThrottleOutOfOrderWrites bool
|
||||
}
|
||||
|
||||
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
|
||||
// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
|
||||
// If cfg is nil, default options are used.
|
||||
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
|
||||
if cfg == nil {
|
||||
// For justification of these defaults, see:
|
||||
// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
|
||||
cfg = &PriorityWriteSchedulerConfig{
|
||||
MaxClosedNodesInTree: 10,
|
||||
MaxIdleNodesInTree: 10,
|
||||
ThrottleOutOfOrderWrites: false,
|
||||
}
|
||||
}
|
||||
|
||||
ws := &priorityWriteScheduler{
|
||||
nodes: make(map[uint32]*priorityNode),
|
||||
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
|
||||
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
|
||||
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
|
||||
}
|
||||
ws.nodes[0] = &ws.root
|
||||
if cfg.ThrottleOutOfOrderWrites {
|
||||
ws.writeThrottleLimit = 1024
|
||||
} else {
|
||||
ws.writeThrottleLimit = math.MaxInt32
|
||||
}
|
||||
return ws
|
||||
}
|
||||
|
||||
type priorityNodeState int
|
||||
|
||||
const (
|
||||
priorityNodeOpen priorityNodeState = iota
|
||||
priorityNodeClosed
|
||||
priorityNodeIdle
|
||||
)
|
||||
|
||||
// priorityNode is a node in an HTTP/2 priority tree.
|
||||
// Each node is associated with a single stream ID.
|
||||
// See RFC 7540, Section 5.3.
|
||||
type priorityNode struct {
|
||||
q writeQueue // queue of pending frames to write
|
||||
id uint32 // id of the stream, or 0 for the root of the tree
|
||||
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
|
||||
state priorityNodeState // open | closed | idle
|
||||
bytes int64 // number of bytes written by this node, or 0 if closed
|
||||
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
|
||||
|
||||
// These links form the priority tree.
|
||||
parent *priorityNode
|
||||
kids *priorityNode // start of the kids list
|
||||
prev, next *priorityNode // doubly-linked list of siblings
|
||||
}
|
||||
|
||||
func (n *priorityNode) setParent(parent *priorityNode) {
|
||||
if n == parent {
|
||||
panic("setParent to self")
|
||||
}
|
||||
if n.parent == parent {
|
||||
return
|
||||
}
|
||||
// Unlink from current parent.
|
||||
if parent := n.parent; parent != nil {
|
||||
if n.prev == nil {
|
||||
parent.kids = n.next
|
||||
} else {
|
||||
n.prev.next = n.next
|
||||
}
|
||||
if n.next != nil {
|
||||
n.next.prev = n.prev
|
||||
}
|
||||
}
|
||||
// Link to new parent.
|
||||
// If parent=nil, remove n from the tree.
|
||||
// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
|
||||
n.parent = parent
|
||||
if parent == nil {
|
||||
n.next = nil
|
||||
n.prev = nil
|
||||
} else {
|
||||
n.next = parent.kids
|
||||
n.prev = nil
|
||||
if n.next != nil {
|
||||
n.next.prev = n
|
||||
}
|
||||
parent.kids = n
|
||||
}
|
||||
}
|
||||
|
||||
func (n *priorityNode) addBytes(b int64) {
|
||||
n.bytes += b
|
||||
for ; n != nil; n = n.parent {
|
||||
n.subtreeBytes += b
|
||||
}
|
||||
}
|
||||
|
||||
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
|
||||
// with a non-empty write queue. When f returns true, this function returns true and the
|
||||
// walk halts. tmp is used as scratch space for sorting.
|
||||
//
|
||||
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
|
||||
// if any ancestor p of n is still open (ignoring the root node).
|
||||
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
|
||||
if !n.q.empty() && f(n, openParent) {
|
||||
return true
|
||||
}
|
||||
if n.kids == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Don't consider the root "open" when updating openParent since
|
||||
// we can't send data frames on the root stream (only control frames).
|
||||
if n.id != 0 {
|
||||
openParent = openParent || (n.state == priorityNodeOpen)
|
||||
}
|
||||
|
||||
// Common case: only one kid or all kids have the same weight.
|
||||
// Some clients don't use weights; other clients (like web browsers)
|
||||
// use mostly-linear priority trees.
|
||||
w := n.kids.weight
|
||||
needSort := false
|
||||
for k := n.kids.next; k != nil; k = k.next {
|
||||
if k.weight != w {
|
||||
needSort = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !needSort {
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
if k.walkReadyInOrder(openParent, tmp, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Uncommon case: sort the child nodes. We remove the kids from the parent,
|
||||
// then re-insert after sorting so we can reuse tmp for future sort calls.
|
||||
*tmp = (*tmp)[:0]
|
||||
for n.kids != nil {
|
||||
*tmp = append(*tmp, n.kids)
|
||||
n.kids.setParent(nil)
|
||||
}
|
||||
sort.Sort(sortPriorityNodeSiblings(*tmp))
|
||||
for i := len(*tmp) - 1; i >= 0; i-- {
|
||||
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
|
||||
}
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
if k.walkReadyInOrder(openParent, tmp, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type sortPriorityNodeSiblings []*priorityNode
|
||||
|
||||
func (z sortPriorityNodeSiblings) Len() int { return len(z) }
|
||||
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
|
||||
func (z sortPriorityNodeSiblings) Less(i, k int) bool {
|
||||
// Prefer the subtree that has sent fewer bytes relative to its weight.
|
||||
// See sections 5.3.2 and 5.3.4.
|
||||
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
|
||||
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
|
||||
if bi == 0 && bk == 0 {
|
||||
return wi >= wk
|
||||
}
|
||||
if bk == 0 {
|
||||
return false
|
||||
}
|
||||
return bi/bk <= wi/wk
|
||||
}
|
||||
|
||||
type priorityWriteScheduler struct {
|
||||
// root is the root of the priority tree, where root.id = 0.
|
||||
// The root queues control frames that are not associated with any stream.
|
||||
root priorityNode
|
||||
|
||||
// nodes maps stream ids to priority tree nodes.
|
||||
nodes map[uint32]*priorityNode
|
||||
|
||||
// maxID is the maximum stream id in nodes.
|
||||
maxID uint32
|
||||
|
||||
// lists of nodes that have been closed or are idle, but are kept in
|
||||
// the tree for improved prioritization. When the lengths exceed either
|
||||
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
|
||||
closedNodes, idleNodes []*priorityNode
|
||||
|
||||
// From the config.
|
||||
maxClosedNodesInTree int
|
||||
maxIdleNodesInTree int
|
||||
writeThrottleLimit int32
|
||||
enableWriteThrottle bool
|
||||
|
||||
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
|
||||
tmp []*priorityNode
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||
// The stream may be currently idle but cannot be opened or closed.
|
||||
if curr := ws.nodes[streamID]; curr != nil {
|
||||
if curr.state != priorityNodeIdle {
|
||||
panic(fmt.Sprintf("stream %d already opened", streamID))
|
||||
}
|
||||
curr.state = priorityNodeOpen
|
||||
return
|
||||
}
|
||||
|
||||
// RFC 7540, Section 5.3.5:
|
||||
// "All streams are initially assigned a non-exclusive dependency on stream 0x0.
|
||||
// Pushed streams initially depend on their associated stream. In both cases,
|
||||
// streams are assigned a default weight of 16."
|
||||
parent := ws.nodes[options.PusherID]
|
||||
if parent == nil {
|
||||
parent = &ws.root
|
||||
}
|
||||
n := &priorityNode{
|
||||
q: *ws.queuePool.get(),
|
||||
id: streamID,
|
||||
weight: priorityDefaultWeight,
|
||||
state: priorityNodeOpen,
|
||||
}
|
||||
n.setParent(parent)
|
||||
ws.nodes[streamID] = n
|
||||
if streamID > ws.maxID {
|
||||
ws.maxID = streamID
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
|
||||
if streamID == 0 {
|
||||
panic("violation of WriteScheduler interface: cannot close stream 0")
|
||||
}
|
||||
if ws.nodes[streamID] == nil {
|
||||
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
|
||||
}
|
||||
if ws.nodes[streamID].state != priorityNodeOpen {
|
||||
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
|
||||
}
|
||||
|
||||
n := ws.nodes[streamID]
|
||||
n.state = priorityNodeClosed
|
||||
n.addBytes(-n.bytes)
|
||||
|
||||
q := n.q
|
||||
ws.queuePool.put(&q)
|
||||
n.q.s = nil
|
||||
if ws.maxClosedNodesInTree > 0 {
|
||||
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
|
||||
} else {
|
||||
ws.removeNode(n)
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||
if streamID == 0 {
|
||||
panic("adjustPriority on root")
|
||||
}
|
||||
|
||||
// If streamID does not exist, there are two cases:
|
||||
// - A closed stream that has been removed (this will have ID <= maxID)
|
||||
// - An idle stream that is being used for "grouping" (this will have ID > maxID)
|
||||
n := ws.nodes[streamID]
|
||||
if n == nil {
|
||||
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
|
||||
return
|
||||
}
|
||||
ws.maxID = streamID
|
||||
n = &priorityNode{
|
||||
q: *ws.queuePool.get(),
|
||||
id: streamID,
|
||||
weight: priorityDefaultWeight,
|
||||
state: priorityNodeIdle,
|
||||
}
|
||||
n.setParent(&ws.root)
|
||||
ws.nodes[streamID] = n
|
||||
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
|
||||
}
|
||||
|
||||
// Section 5.3.1: A dependency on a stream that is not currently in the tree
|
||||
// results in that stream being given a default priority (Section 5.3.5).
|
||||
parent := ws.nodes[priority.StreamDep]
|
||||
if parent == nil {
|
||||
n.setParent(&ws.root)
|
||||
n.weight = priorityDefaultWeight
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore if the client tries to make a node its own parent.
|
||||
if n == parent {
|
||||
return
|
||||
}
|
||||
|
||||
// Section 5.3.3:
|
||||
// "If a stream is made dependent on one of its own dependencies, the
|
||||
// formerly dependent stream is first moved to be dependent on the
|
||||
// reprioritized stream's previous parent. The moved dependency retains
|
||||
// its weight."
|
||||
//
|
||||
// That is: if parent depends on n, move parent to depend on n.parent.
|
||||
for x := parent.parent; x != nil; x = x.parent {
|
||||
if x == n {
|
||||
parent.setParent(n.parent)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Section 5.3.3: The exclusive flag causes the stream to become the sole
|
||||
// dependency of its parent stream, causing other dependencies to become
|
||||
// dependent on the exclusive stream.
|
||||
if priority.Exclusive {
|
||||
k := parent.kids
|
||||
for k != nil {
|
||||
next := k.next
|
||||
if k != n {
|
||||
k.setParent(n)
|
||||
}
|
||||
k = next
|
||||
}
|
||||
}
|
||||
|
||||
n.setParent(parent)
|
||||
n.weight = priority.Weight
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
|
||||
var n *priorityNode
|
||||
if id := wr.StreamID(); id == 0 {
|
||||
n = &ws.root
|
||||
} else {
|
||||
n = ws.nodes[id]
|
||||
if n == nil {
|
||||
// id is an idle or closed stream. wr should not be a HEADERS or
|
||||
// DATA frame. However, wr can be a RST_STREAM. In this case, we
|
||||
// push wr onto the root, rather than creating a new priorityNode,
|
||||
// since RST_STREAM is tiny and the stream's priority is unknown
|
||||
// anyway. See issue #17919.
|
||||
if wr.DataSize() > 0 {
|
||||
panic("add DATA on non-open stream")
|
||||
}
|
||||
n = &ws.root
|
||||
}
|
||||
}
|
||||
n.q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
|
||||
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
|
||||
limit := int32(math.MaxInt32)
|
||||
if openParent {
|
||||
limit = ws.writeThrottleLimit
|
||||
}
|
||||
wr, ok = n.q.consume(limit)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
n.addBytes(int64(wr.DataSize()))
|
||||
// If B depends on A and B continuously has data available but A
|
||||
// does not, gradually increase the throttling limit to allow B to
|
||||
// steal more and more bandwidth from A.
|
||||
if openParent {
|
||||
ws.writeThrottleLimit += 1024
|
||||
if ws.writeThrottleLimit < 0 {
|
||||
ws.writeThrottleLimit = math.MaxInt32
|
||||
}
|
||||
} else if ws.enableWriteThrottle {
|
||||
ws.writeThrottleLimit = 1024
|
||||
}
|
||||
return true
|
||||
})
|
||||
return wr, ok
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
|
||||
if maxSize == 0 {
|
||||
return
|
||||
}
|
||||
if len(*list) == maxSize {
|
||||
// Remove the oldest node, then shift left.
|
||||
ws.removeNode((*list)[0])
|
||||
x := (*list)[1:]
|
||||
copy(*list, x)
|
||||
*list = (*list)[:len(x)]
|
||||
}
|
||||
*list = append(*list, n)
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
k.setParent(n.parent)
|
||||
}
|
||||
n.setParent(nil)
|
||||
delete(ws.nodes, n.id)
|
||||
}
|
77
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
Normal file
77
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "math"
|
||||
|
||||
// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
|
||||
// priorities. Control frames like SETTINGS and PING are written before DATA
|
||||
// frames, but if no control frames are queued and multiple streams have queued
|
||||
// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
|
||||
func NewRandomWriteScheduler() WriteScheduler {
|
||||
return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
|
||||
}
|
||||
|
||||
type randomWriteScheduler struct {
|
||||
// zero are frames not associated with a specific stream.
|
||||
zero writeQueue
|
||||
|
||||
// sq contains the stream-specific queues, keyed by stream ID.
|
||||
// When a stream is idle, closed, or emptied, it's deleted
|
||||
// from the map.
|
||||
sq map[uint32]*writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||
// no-op: idle streams are not tracked
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
|
||||
q, ok := ws.sq[streamID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(ws.sq, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||
// no-op: priorities are ignored
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
||||
id := wr.StreamID()
|
||||
if id == 0 {
|
||||
ws.zero.push(wr)
|
||||
return
|
||||
}
|
||||
q, ok := ws.sq[id]
|
||||
if !ok {
|
||||
q = ws.queuePool.get()
|
||||
ws.sq[id] = q
|
||||
}
|
||||
q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||
// Control frames first.
|
||||
if !ws.zero.empty() {
|
||||
return ws.zero.shift(), true
|
||||
}
|
||||
// Iterate over all non-idle streams until finding one that can be consumed.
|
||||
for streamID, q := range ws.sq {
|
||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||
if q.empty() {
|
||||
delete(ws.sq, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
return wr, true
|
||||
}
|
||||
}
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
59
vendor/golang.org/x/net/icmp/dstunreach.go
generated
vendored
Normal file
59
vendor/golang.org/x/net/icmp/dstunreach.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"golang.org/x/net/internal/iana"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// A DstUnreach represents an ICMP destination unreachable message
|
||||
// body.
|
||||
type DstUnreach struct {
|
||||
Data []byte // data, known as original datagram field
|
||||
Extensions []Extension // extensions
|
||||
}
|
||||
|
||||
// Len implements the Len method of MessageBody interface.
|
||||
func (p *DstUnreach) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions)
|
||||
return l
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of MessageBody interface.
|
||||
func (p *DstUnreach) Marshal(proto int) ([]byte, error) {
|
||||
var typ Type
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
typ = ipv4.ICMPTypeDestinationUnreachable
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
typ = ipv6.ICMPTypeDestinationUnreachable
|
||||
default:
|
||||
return nil, errInvalidProtocol
|
||||
}
|
||||
if !validExtensions(typ, p.Extensions) {
|
||||
return nil, errInvalidExtension
|
||||
}
|
||||
return marshalMultipartMessageBody(proto, true, p.Data, p.Extensions)
|
||||
}
|
||||
|
||||
// parseDstUnreach parses b as an ICMP destination unreachable message
|
||||
// body.
|
||||
func parseDstUnreach(proto int, typ Type, b []byte) (MessageBody, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
p := &DstUnreach{}
|
||||
var err error
|
||||
p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
173
vendor/golang.org/x/net/icmp/echo.go
generated
vendored
Normal file
173
vendor/golang.org/x/net/icmp/echo.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"golang.org/x/net/internal/iana"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// An Echo represents an ICMP echo request or reply message body.
|
||||
type Echo struct {
|
||||
ID int // identifier
|
||||
Seq int // sequence number
|
||||
Data []byte // data
|
||||
}
|
||||
|
||||
// Len implements the Len method of MessageBody interface.
|
||||
func (p *Echo) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
return 4 + len(p.Data)
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of MessageBody interface.
|
||||
func (p *Echo) Marshal(proto int) ([]byte, error) {
|
||||
b := make([]byte, 4+len(p.Data))
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(p.ID))
|
||||
binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq))
|
||||
copy(b[4:], p.Data)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parseEcho parses b as an ICMP echo request or reply message body.
|
||||
func parseEcho(proto int, _ Type, b []byte) (MessageBody, error) {
|
||||
bodyLen := len(b)
|
||||
if bodyLen < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))}
|
||||
if bodyLen > 4 {
|
||||
p.Data = make([]byte, bodyLen-4)
|
||||
copy(p.Data, b[4:])
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// An ExtendedEchoRequest represents an ICMP extended echo request
|
||||
// message body.
|
||||
type ExtendedEchoRequest struct {
|
||||
ID int // identifier
|
||||
Seq int // sequence number
|
||||
Local bool // must be true when identifying by name or index
|
||||
Extensions []Extension // extensions
|
||||
}
|
||||
|
||||
// Len implements the Len method of MessageBody interface.
|
||||
func (p *ExtendedEchoRequest) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
l, _ := multipartMessageBodyDataLen(proto, false, nil, p.Extensions)
|
||||
return l
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of MessageBody interface.
|
||||
func (p *ExtendedEchoRequest) Marshal(proto int) ([]byte, error) {
|
||||
var typ Type
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
typ = ipv4.ICMPTypeExtendedEchoRequest
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
typ = ipv6.ICMPTypeExtendedEchoRequest
|
||||
default:
|
||||
return nil, errInvalidProtocol
|
||||
}
|
||||
if !validExtensions(typ, p.Extensions) {
|
||||
return nil, errInvalidExtension
|
||||
}
|
||||
b, err := marshalMultipartMessageBody(proto, false, nil, p.Extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(p.ID))
|
||||
b[2] = byte(p.Seq)
|
||||
if p.Local {
|
||||
b[3] |= 0x01
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parseExtendedEchoRequest parses b as an ICMP extended echo request
|
||||
// message body.
|
||||
func parseExtendedEchoRequest(proto int, typ Type, b []byte) (MessageBody, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
p := &ExtendedEchoRequest{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(b[2])}
|
||||
if b[3]&0x01 != 0 {
|
||||
p.Local = true
|
||||
}
|
||||
var err error
|
||||
_, p.Extensions, err = parseMultipartMessageBody(proto, typ, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// An ExtendedEchoReply represents an ICMP extended echo reply message
|
||||
// body.
|
||||
type ExtendedEchoReply struct {
|
||||
ID int // identifier
|
||||
Seq int // sequence number
|
||||
State int // 3-bit state working together with Message.Code
|
||||
Active bool // probed interface is active
|
||||
IPv4 bool // probed interface runs IPv4
|
||||
IPv6 bool // probed interface runs IPv6
|
||||
}
|
||||
|
||||
// Len implements the Len method of MessageBody interface.
|
||||
func (p *ExtendedEchoReply) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
return 4
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of MessageBody interface.
|
||||
func (p *ExtendedEchoReply) Marshal(proto int) ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(p.ID))
|
||||
b[2] = byte(p.Seq)
|
||||
b[3] = byte(p.State<<5) & 0xe0
|
||||
if p.Active {
|
||||
b[3] |= 0x04
|
||||
}
|
||||
if p.IPv4 {
|
||||
b[3] |= 0x02
|
||||
}
|
||||
if p.IPv6 {
|
||||
b[3] |= 0x01
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parseExtendedEchoReply parses b as an ICMP extended echo reply
|
||||
// message body.
|
||||
func parseExtendedEchoReply(proto int, _ Type, b []byte) (MessageBody, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
p := &ExtendedEchoReply{
|
||||
ID: int(binary.BigEndian.Uint16(b[:2])),
|
||||
Seq: int(b[2]),
|
||||
State: int(b[3]) >> 5,
|
||||
}
|
||||
if b[3]&0x04 != 0 {
|
||||
p.Active = true
|
||||
}
|
||||
if b[3]&0x02 != 0 {
|
||||
p.IPv4 = true
|
||||
}
|
||||
if b[3]&0x01 != 0 {
|
||||
p.IPv6 = true
|
||||
}
|
||||
return p, nil
|
||||
}
|
113
vendor/golang.org/x/net/icmp/endpoint.go
generated
vendored
Normal file
113
vendor/golang.org/x/net/icmp/endpoint.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"net"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
var _ net.PacketConn = &PacketConn{}
|
||||
|
||||
// A PacketConn represents a packet network endpoint that uses either
|
||||
// ICMPv4 or ICMPv6.
|
||||
type PacketConn struct {
|
||||
c net.PacketConn
|
||||
p4 *ipv4.PacketConn
|
||||
p6 *ipv6.PacketConn
|
||||
}
|
||||
|
||||
func (c *PacketConn) ok() bool { return c != nil && c.c != nil }
|
||||
|
||||
// IPv4PacketConn returns the ipv4.PacketConn of c.
|
||||
// It returns nil when c is not created as the endpoint for ICMPv4.
|
||||
func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn {
|
||||
if !c.ok() {
|
||||
return nil
|
||||
}
|
||||
return c.p4
|
||||
}
|
||||
|
||||
// IPv6PacketConn returns the ipv6.PacketConn of c.
|
||||
// It returns nil when c is not created as the endpoint for ICMPv6.
|
||||
func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn {
|
||||
if !c.ok() {
|
||||
return nil
|
||||
}
|
||||
return c.p6
|
||||
}
|
||||
|
||||
// ReadFrom reads an ICMP message from the connection.
|
||||
func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) {
|
||||
if !c.ok() {
|
||||
return 0, nil, errInvalidConn
|
||||
}
|
||||
// Please be informed that ipv4.NewPacketConn enables
|
||||
// IP_STRIPHDR option by default on Darwin.
|
||||
// See golang.org/issue/9395 for further information.
|
||||
if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && c.p4 != nil {
|
||||
n, _, peer, err := c.p4.ReadFrom(b)
|
||||
return n, peer, err
|
||||
}
|
||||
return c.c.ReadFrom(b)
|
||||
}
|
||||
|
||||
// WriteTo writes the ICMP message b to dst.
|
||||
// The provided dst must be net.UDPAddr when c is a non-privileged
|
||||
// datagram-oriented ICMP endpoint.
|
||||
// Otherwise it must be net.IPAddr.
|
||||
func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) {
|
||||
if !c.ok() {
|
||||
return 0, errInvalidConn
|
||||
}
|
||||
return c.c.WriteTo(b, dst)
|
||||
}
|
||||
|
||||
// Close closes the endpoint.
|
||||
func (c *PacketConn) Close() error {
|
||||
if !c.ok() {
|
||||
return errInvalidConn
|
||||
}
|
||||
return c.c.Close()
|
||||
}
|
||||
|
||||
// LocalAddr returns the local network address.
|
||||
func (c *PacketConn) LocalAddr() net.Addr {
|
||||
if !c.ok() {
|
||||
return nil
|
||||
}
|
||||
return c.c.LocalAddr()
|
||||
}
|
||||
|
||||
// SetDeadline sets the read and write deadlines associated with the
|
||||
// endpoint.
|
||||
func (c *PacketConn) SetDeadline(t time.Time) error {
|
||||
if !c.ok() {
|
||||
return errInvalidConn
|
||||
}
|
||||
return c.c.SetDeadline(t)
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the read deadline associated with the
|
||||
// endpoint.
|
||||
func (c *PacketConn) SetReadDeadline(t time.Time) error {
|
||||
if !c.ok() {
|
||||
return errInvalidConn
|
||||
}
|
||||
return c.c.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the write deadline associated with the
|
||||
// endpoint.
|
||||
func (c *PacketConn) SetWriteDeadline(t time.Time) error {
|
||||
if !c.ok() {
|
||||
return errInvalidConn
|
||||
}
|
||||
return c.c.SetWriteDeadline(t)
|
||||
}
|
170
vendor/golang.org/x/net/icmp/extension.go
generated
vendored
Normal file
170
vendor/golang.org/x/net/icmp/extension.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// An Extension represents an ICMP extension.
|
||||
type Extension interface {
|
||||
// Len returns the length of ICMP extension.
|
||||
// The provided proto must be either the ICMPv4 or ICMPv6
|
||||
// protocol number.
|
||||
Len(proto int) int
|
||||
|
||||
// Marshal returns the binary encoding of ICMP extension.
|
||||
// The provided proto must be either the ICMPv4 or ICMPv6
|
||||
// protocol number.
|
||||
Marshal(proto int) ([]byte, error)
|
||||
}
|
||||
|
||||
const extensionVersion = 2
|
||||
|
||||
func validExtensionHeader(b []byte) bool {
|
||||
v := int(b[0]&0xf0) >> 4
|
||||
s := binary.BigEndian.Uint16(b[2:4])
|
||||
if s != 0 {
|
||||
s = checksum(b)
|
||||
}
|
||||
if v != extensionVersion || s != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// parseExtensions parses b as a list of ICMP extensions.
|
||||
// The length attribute l must be the length attribute field in
|
||||
// received icmp messages.
|
||||
//
|
||||
// It will return a list of ICMP extensions and an adjusted length
|
||||
// attribute that represents the length of the padded original
|
||||
// datagram field. Otherwise, it returns an error.
|
||||
func parseExtensions(typ Type, b []byte, l int) ([]Extension, int, error) {
|
||||
// Still a lot of non-RFC 4884 compliant implementations are
|
||||
// out there. Set the length attribute l to 128 when it looks
|
||||
// inappropriate for backwards compatibility.
|
||||
//
|
||||
// A minimal extension at least requires 8 octets; 4 octets
|
||||
// for an extension header, and 4 octets for a single object
|
||||
// header.
|
||||
//
|
||||
// See RFC 4884 for further information.
|
||||
switch typ {
|
||||
case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest:
|
||||
if len(b) < 8 || !validExtensionHeader(b) {
|
||||
return nil, -1, errNoExtension
|
||||
}
|
||||
l = 0
|
||||
default:
|
||||
if 128 > l || l+8 > len(b) {
|
||||
l = 128
|
||||
}
|
||||
if l+8 > len(b) {
|
||||
return nil, -1, errNoExtension
|
||||
}
|
||||
if !validExtensionHeader(b[l:]) {
|
||||
if l == 128 {
|
||||
return nil, -1, errNoExtension
|
||||
}
|
||||
l = 128
|
||||
if !validExtensionHeader(b[l:]) {
|
||||
return nil, -1, errNoExtension
|
||||
}
|
||||
}
|
||||
}
|
||||
var exts []Extension
|
||||
for b = b[l+4:]; len(b) >= 4; {
|
||||
ol := int(binary.BigEndian.Uint16(b[:2]))
|
||||
if 4 > ol || ol > len(b) {
|
||||
break
|
||||
}
|
||||
switch b[2] {
|
||||
case classMPLSLabelStack:
|
||||
ext, err := parseMPLSLabelStack(b[:ol])
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
exts = append(exts, ext)
|
||||
case classInterfaceInfo:
|
||||
ext, err := parseInterfaceInfo(b[:ol])
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
exts = append(exts, ext)
|
||||
case classInterfaceIdent:
|
||||
ext, err := parseInterfaceIdent(b[:ol])
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
exts = append(exts, ext)
|
||||
default:
|
||||
ext := &RawExtension{Data: make([]byte, ol)}
|
||||
copy(ext.Data, b[:ol])
|
||||
exts = append(exts, ext)
|
||||
}
|
||||
b = b[ol:]
|
||||
}
|
||||
return exts, l, nil
|
||||
}
|
||||
|
||||
func validExtensions(typ Type, exts []Extension) bool {
|
||||
switch typ {
|
||||
case ipv4.ICMPTypeDestinationUnreachable, ipv4.ICMPTypeTimeExceeded, ipv4.ICMPTypeParameterProblem,
|
||||
ipv6.ICMPTypeDestinationUnreachable, ipv6.ICMPTypeTimeExceeded:
|
||||
for i := range exts {
|
||||
switch exts[i].(type) {
|
||||
case *MPLSLabelStack, *InterfaceInfo, *RawExtension:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest:
|
||||
var n int
|
||||
for i := range exts {
|
||||
switch exts[i].(type) {
|
||||
case *InterfaceIdent:
|
||||
n++
|
||||
case *RawExtension:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Not a single InterfaceIdent object or a combo of
|
||||
// RawExtension and InterfaceIdent objects is not
|
||||
// allowed.
|
||||
if n == 1 && len(exts) > 1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// A RawExtension represents a raw extension.
|
||||
//
|
||||
// A raw extension is excluded from message processing and can be used
|
||||
// to construct applications such as protocol conformance testing.
|
||||
type RawExtension struct {
|
||||
Data []byte // data
|
||||
}
|
||||
|
||||
// Len implements the Len method of Extension interface.
|
||||
func (p *RawExtension) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
return len(p.Data)
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of Extension interface.
|
||||
func (p *RawExtension) Marshal(proto int) ([]byte, error) {
|
||||
return p.Data, nil
|
||||
}
|
75
vendor/golang.org/x/net/icmp/helper_posix.go
generated
vendored
Normal file
75
vendor/golang.org/x/net/icmp/helper_posix.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func sockaddr(family int, address string) (syscall.Sockaddr, error) {
|
||||
switch family {
|
||||
case syscall.AF_INET:
|
||||
a, err := net.ResolveIPAddr("ip4", address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(a.IP) == 0 {
|
||||
a.IP = net.IPv4zero
|
||||
}
|
||||
if a.IP = a.IP.To4(); a.IP == nil {
|
||||
return nil, net.InvalidAddrError("non-ipv4 address")
|
||||
}
|
||||
sa := &syscall.SockaddrInet4{}
|
||||
copy(sa.Addr[:], a.IP)
|
||||
return sa, nil
|
||||
case syscall.AF_INET6:
|
||||
a, err := net.ResolveIPAddr("ip6", address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(a.IP) == 0 {
|
||||
a.IP = net.IPv6unspecified
|
||||
}
|
||||
if a.IP.Equal(net.IPv4zero) {
|
||||
a.IP = net.IPv6unspecified
|
||||
}
|
||||
if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil {
|
||||
return nil, net.InvalidAddrError("non-ipv6 address")
|
||||
}
|
||||
sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)}
|
||||
copy(sa.Addr[:], a.IP)
|
||||
return sa, nil
|
||||
default:
|
||||
return nil, net.InvalidAddrError("unexpected family")
|
||||
}
|
||||
}
|
||||
|
||||
func zoneToUint32(zone string) uint32 {
|
||||
if zone == "" {
|
||||
return 0
|
||||
}
|
||||
if ifi, err := net.InterfaceByName(zone); err == nil {
|
||||
return uint32(ifi.Index)
|
||||
}
|
||||
n, err := strconv.Atoi(zone)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return uint32(n)
|
||||
}
|
||||
|
||||
func last(s string, b byte) int {
|
||||
i := len(s)
|
||||
for i--; i >= 0; i-- {
|
||||
if s[i] == b {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
322
vendor/golang.org/x/net/icmp/interface.go
generated
vendored
Normal file
322
vendor/golang.org/x/net/icmp/interface.go
generated
vendored
Normal file
@ -0,0 +1,322 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/internal/iana"
|
||||
)
|
||||
|
||||
const (
|
||||
classInterfaceInfo = 2
|
||||
)
|
||||
|
||||
const (
|
||||
attrMTU = 1 << iota
|
||||
attrName
|
||||
attrIPAddr
|
||||
attrIfIndex
|
||||
)
|
||||
|
||||
// An InterfaceInfo represents interface and next-hop identification.
|
||||
type InterfaceInfo struct {
|
||||
Class int // extension object class number
|
||||
Type int // extension object sub-type
|
||||
Interface *net.Interface
|
||||
Addr *net.IPAddr
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) nameLen() int {
|
||||
if len(ifi.Interface.Name) > 63 {
|
||||
return 64
|
||||
}
|
||||
l := 1 + len(ifi.Interface.Name)
|
||||
return (l + 3) &^ 3
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) {
|
||||
l = 4
|
||||
if ifi.Interface != nil && ifi.Interface.Index > 0 {
|
||||
attrs |= attrIfIndex
|
||||
l += 4
|
||||
if len(ifi.Interface.Name) > 0 {
|
||||
attrs |= attrName
|
||||
l += ifi.nameLen()
|
||||
}
|
||||
if ifi.Interface.MTU > 0 {
|
||||
attrs |= attrMTU
|
||||
l += 4
|
||||
}
|
||||
}
|
||||
if ifi.Addr != nil {
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
if ifi.Addr.IP.To4() != nil {
|
||||
attrs |= attrIPAddr
|
||||
l += 4 + net.IPv4len
|
||||
}
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil {
|
||||
attrs |= attrIPAddr
|
||||
l += 4 + net.IPv6len
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Len implements the Len method of Extension interface.
|
||||
func (ifi *InterfaceInfo) Len(proto int) int {
|
||||
_, l := ifi.attrsAndLen(proto)
|
||||
return l
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of Extension interface.
|
||||
func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) {
|
||||
attrs, l := ifi.attrsAndLen(proto)
|
||||
b := make([]byte, l)
|
||||
if err := ifi.marshal(proto, b, attrs, l); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error {
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(l))
|
||||
b[2], b[3] = classInterfaceInfo, byte(ifi.Type)
|
||||
for b = b[4:]; len(b) > 0 && attrs != 0; {
|
||||
switch {
|
||||
case attrs&attrIfIndex != 0:
|
||||
b = ifi.marshalIfIndex(proto, b)
|
||||
attrs &^= attrIfIndex
|
||||
case attrs&attrIPAddr != 0:
|
||||
b = ifi.marshalIPAddr(proto, b)
|
||||
attrs &^= attrIPAddr
|
||||
case attrs&attrName != 0:
|
||||
b = ifi.marshalName(proto, b)
|
||||
attrs &^= attrName
|
||||
case attrs&attrMTU != 0:
|
||||
b = ifi.marshalMTU(proto, b)
|
||||
attrs &^= attrMTU
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte {
|
||||
binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index))
|
||||
return b[4:]
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4]))
|
||||
return b[4:], nil
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte {
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(iana.AddrFamilyIPv4))
|
||||
copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4())
|
||||
b = b[4+net.IPv4len:]
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(iana.AddrFamilyIPv6))
|
||||
copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16())
|
||||
b = b[4+net.IPv6len:]
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
afi := int(binary.BigEndian.Uint16(b[:2]))
|
||||
b = b[4:]
|
||||
switch afi {
|
||||
case iana.AddrFamilyIPv4:
|
||||
if len(b) < net.IPv4len {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
ifi.Addr.IP = make(net.IP, net.IPv4len)
|
||||
copy(ifi.Addr.IP, b[:net.IPv4len])
|
||||
b = b[net.IPv4len:]
|
||||
case iana.AddrFamilyIPv6:
|
||||
if len(b) < net.IPv6len {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
ifi.Addr.IP = make(net.IP, net.IPv6len)
|
||||
copy(ifi.Addr.IP, b[:net.IPv6len])
|
||||
b = b[net.IPv6len:]
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte {
|
||||
l := byte(ifi.nameLen())
|
||||
b[0] = l
|
||||
copy(b[1:], []byte(ifi.Interface.Name))
|
||||
return b[l:]
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) {
|
||||
if 4 > len(b) || len(b) < int(b[0]) {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
l := int(b[0])
|
||||
if l%4 != 0 || 4 > l || l > 64 {
|
||||
return nil, errInvalidExtension
|
||||
}
|
||||
var name [63]byte
|
||||
copy(name[:], b[1:l])
|
||||
ifi.Interface.Name = strings.Trim(string(name[:]), "\000")
|
||||
return b[l:], nil
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte {
|
||||
binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU))
|
||||
return b[4:]
|
||||
}
|
||||
|
||||
func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4]))
|
||||
return b[4:], nil
|
||||
}
|
||||
|
||||
func parseInterfaceInfo(b []byte) (Extension, error) {
|
||||
ifi := &InterfaceInfo{
|
||||
Class: int(b[2]),
|
||||
Type: int(b[3]),
|
||||
}
|
||||
if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 {
|
||||
ifi.Interface = &net.Interface{}
|
||||
}
|
||||
if ifi.Type&attrIPAddr != 0 {
|
||||
ifi.Addr = &net.IPAddr{}
|
||||
}
|
||||
attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU)
|
||||
for b = b[4:]; len(b) > 0 && attrs != 0; {
|
||||
var err error
|
||||
switch {
|
||||
case attrs&attrIfIndex != 0:
|
||||
b, err = ifi.parseIfIndex(b)
|
||||
attrs &^= attrIfIndex
|
||||
case attrs&attrIPAddr != 0:
|
||||
b, err = ifi.parseIPAddr(b)
|
||||
attrs &^= attrIPAddr
|
||||
case attrs&attrName != 0:
|
||||
b, err = ifi.parseName(b)
|
||||
attrs &^= attrName
|
||||
case attrs&attrMTU != 0:
|
||||
b, err = ifi.parseMTU(b)
|
||||
attrs &^= attrMTU
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil {
|
||||
ifi.Addr.Zone = ifi.Interface.Name
|
||||
}
|
||||
return ifi, nil
|
||||
}
|
||||
|
||||
const (
|
||||
classInterfaceIdent = 3
|
||||
typeInterfaceByName = 1
|
||||
typeInterfaceByIndex = 2
|
||||
typeInterfaceByAddress = 3
|
||||
)
|
||||
|
||||
// An InterfaceIdent represents interface identification.
|
||||
type InterfaceIdent struct {
|
||||
Class int // extension object class number
|
||||
Type int // extension object sub-type
|
||||
Name string // interface name
|
||||
Index int // interface index
|
||||
AFI int // address family identifier; see address family numbers in IANA registry
|
||||
Addr []byte // address
|
||||
}
|
||||
|
||||
// Len implements the Len method of Extension interface.
|
||||
func (ifi *InterfaceIdent) Len(_ int) int {
|
||||
switch ifi.Type {
|
||||
case typeInterfaceByName:
|
||||
l := len(ifi.Name)
|
||||
if l > 255 {
|
||||
l = 255
|
||||
}
|
||||
return 4 + (l+3)&^3
|
||||
case typeInterfaceByIndex:
|
||||
return 4 + 4
|
||||
case typeInterfaceByAddress:
|
||||
return 4 + 4 + (len(ifi.Addr)+3)&^3
|
||||
default:
|
||||
return 4
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of Extension interface.
|
||||
func (ifi *InterfaceIdent) Marshal(proto int) ([]byte, error) {
|
||||
b := make([]byte, ifi.Len(proto))
|
||||
if err := ifi.marshal(proto, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (ifi *InterfaceIdent) marshal(proto int, b []byte) error {
|
||||
l := ifi.Len(proto)
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(l))
|
||||
b[2], b[3] = classInterfaceIdent, byte(ifi.Type)
|
||||
switch ifi.Type {
|
||||
case typeInterfaceByName:
|
||||
copy(b[4:], ifi.Name)
|
||||
case typeInterfaceByIndex:
|
||||
binary.BigEndian.PutUint32(b[4:4+4], uint32(ifi.Index))
|
||||
case typeInterfaceByAddress:
|
||||
binary.BigEndian.PutUint16(b[4:4+2], uint16(ifi.AFI))
|
||||
b[4+2] = byte(len(ifi.Addr))
|
||||
copy(b[4+4:], ifi.Addr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseInterfaceIdent(b []byte) (Extension, error) {
|
||||
ifi := &InterfaceIdent{
|
||||
Class: int(b[2]),
|
||||
Type: int(b[3]),
|
||||
}
|
||||
switch ifi.Type {
|
||||
case typeInterfaceByName:
|
||||
ifi.Name = strings.Trim(string(b[4:]), "\x00")
|
||||
case typeInterfaceByIndex:
|
||||
if len(b[4:]) < 4 {
|
||||
return nil, errInvalidExtension
|
||||
}
|
||||
ifi.Index = int(binary.BigEndian.Uint32(b[4 : 4+4]))
|
||||
case typeInterfaceByAddress:
|
||||
if len(b[4:]) < 4 {
|
||||
return nil, errInvalidExtension
|
||||
}
|
||||
ifi.AFI = int(binary.BigEndian.Uint16(b[4 : 4+2]))
|
||||
l := int(b[4+2])
|
||||
if len(b[4+4:]) < l {
|
||||
return nil, errInvalidExtension
|
||||
}
|
||||
ifi.Addr = make([]byte, l)
|
||||
copy(ifi.Addr, b[4+4:])
|
||||
}
|
||||
return ifi, nil
|
||||
}
|
69
vendor/golang.org/x/net/icmp/ipv4.go
generated
vendored
Normal file
69
vendor/golang.org/x/net/icmp/ipv4.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/net/internal/socket"
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
|
||||
// freebsdVersion is set in sys_freebsd.go.
|
||||
// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html.
|
||||
var freebsdVersion uint32
|
||||
|
||||
// ParseIPv4Header returns the IPv4 header of the IPv4 packet that
|
||||
// triggered an ICMP error message.
|
||||
// This is found in the Data field of the ICMP error message body.
|
||||
//
|
||||
// The provided b must be in the format used by a raw ICMP socket on
|
||||
// the local system.
|
||||
// This may differ from the wire format, and the format used by a raw
|
||||
// IP socket, depending on the system.
|
||||
//
|
||||
// To parse an IPv6 header, use ipv6.ParseHeader.
|
||||
func ParseIPv4Header(b []byte) (*ipv4.Header, error) {
|
||||
if len(b) < ipv4.HeaderLen {
|
||||
return nil, errHeaderTooShort
|
||||
}
|
||||
hdrlen := int(b[0]&0x0f) << 2
|
||||
if hdrlen > len(b) {
|
||||
return nil, errBufferTooShort
|
||||
}
|
||||
h := &ipv4.Header{
|
||||
Version: int(b[0] >> 4),
|
||||
Len: hdrlen,
|
||||
TOS: int(b[1]),
|
||||
ID: int(binary.BigEndian.Uint16(b[4:6])),
|
||||
FragOff: int(binary.BigEndian.Uint16(b[6:8])),
|
||||
TTL: int(b[8]),
|
||||
Protocol: int(b[9]),
|
||||
Checksum: int(binary.BigEndian.Uint16(b[10:12])),
|
||||
Src: net.IPv4(b[12], b[13], b[14], b[15]),
|
||||
Dst: net.IPv4(b[16], b[17], b[18], b[19]),
|
||||
}
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "ios":
|
||||
h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4]))
|
||||
case "freebsd":
|
||||
if freebsdVersion >= 1000000 {
|
||||
h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
|
||||
} else {
|
||||
h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4]))
|
||||
}
|
||||
default:
|
||||
h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
|
||||
}
|
||||
h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13
|
||||
h.FragOff = h.FragOff & 0x1fff
|
||||
if hdrlen-ipv4.HeaderLen > 0 {
|
||||
h.Options = make([]byte, hdrlen-ipv4.HeaderLen)
|
||||
copy(h.Options, b[ipv4.HeaderLen:])
|
||||
}
|
||||
return h, nil
|
||||
}
|
23
vendor/golang.org/x/net/icmp/ipv6.go
generated
vendored
Normal file
23
vendor/golang.org/x/net/icmp/ipv6.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/internal/iana"
|
||||
)
|
||||
|
||||
const ipv6PseudoHeaderLen = 2*net.IPv6len + 8
|
||||
|
||||
// IPv6PseudoHeader returns an IPv6 pseudo header for checksum
|
||||
// calculation.
|
||||
func IPv6PseudoHeader(src, dst net.IP) []byte {
|
||||
b := make([]byte, ipv6PseudoHeaderLen)
|
||||
copy(b, src.To16())
|
||||
copy(b[net.IPv6len:], dst.To16())
|
||||
b[len(b)-1] = byte(iana.ProtocolIPv6ICMP)
|
||||
return b
|
||||
}
|
103
vendor/golang.org/x/net/icmp/listen_posix.go
generated
vendored
Normal file
103
vendor/golang.org/x/net/icmp/listen_posix.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/net/internal/iana"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option
|
||||
|
||||
// ListenPacket listens for incoming ICMP packets addressed to
|
||||
// address. See net.Dial for the syntax of address.
|
||||
//
|
||||
// For non-privileged datagram-oriented ICMP endpoints, network must
|
||||
// be "udp4" or "udp6". The endpoint allows to read, write a few
|
||||
// limited ICMP messages such as echo request and echo reply.
|
||||
// Currently only Darwin and Linux support this.
|
||||
//
|
||||
// Examples:
|
||||
// ListenPacket("udp4", "192.168.0.1")
|
||||
// ListenPacket("udp4", "0.0.0.0")
|
||||
// ListenPacket("udp6", "fe80::1%en0")
|
||||
// ListenPacket("udp6", "::")
|
||||
//
|
||||
// For privileged raw ICMP endpoints, network must be "ip4" or "ip6"
|
||||
// followed by a colon and an ICMP protocol number or name.
|
||||
//
|
||||
// Examples:
|
||||
// ListenPacket("ip4:icmp", "192.168.0.1")
|
||||
// ListenPacket("ip4:1", "0.0.0.0")
|
||||
// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0")
|
||||
// ListenPacket("ip6:58", "::")
|
||||
func ListenPacket(network, address string) (*PacketConn, error) {
|
||||
var family, proto int
|
||||
switch network {
|
||||
case "udp4":
|
||||
family, proto = syscall.AF_INET, iana.ProtocolICMP
|
||||
case "udp6":
|
||||
family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP
|
||||
default:
|
||||
i := last(network, ':')
|
||||
if i < 0 {
|
||||
i = len(network)
|
||||
}
|
||||
switch network[:i] {
|
||||
case "ip4":
|
||||
proto = iana.ProtocolICMP
|
||||
case "ip6":
|
||||
proto = iana.ProtocolIPv6ICMP
|
||||
}
|
||||
}
|
||||
var cerr error
|
||||
var c net.PacketConn
|
||||
switch family {
|
||||
case syscall.AF_INET, syscall.AF_INET6:
|
||||
s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
}
|
||||
if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && family == syscall.AF_INET {
|
||||
if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil {
|
||||
syscall.Close(s)
|
||||
return nil, os.NewSyscallError("setsockopt", err)
|
||||
}
|
||||
}
|
||||
sa, err := sockaddr(family, address)
|
||||
if err != nil {
|
||||
syscall.Close(s)
|
||||
return nil, err
|
||||
}
|
||||
if err := syscall.Bind(s, sa); err != nil {
|
||||
syscall.Close(s)
|
||||
return nil, os.NewSyscallError("bind", err)
|
||||
}
|
||||
f := os.NewFile(uintptr(s), "datagram-oriented icmp")
|
||||
c, cerr = net.FilePacketConn(f)
|
||||
f.Close()
|
||||
default:
|
||||
c, cerr = net.ListenPacket(network, address)
|
||||
}
|
||||
if cerr != nil {
|
||||
return nil, cerr
|
||||
}
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil
|
||||
default:
|
||||
return &PacketConn{c: c}, nil
|
||||
}
|
||||
}
|
33
vendor/golang.org/x/net/icmp/listen_stub.go
generated
vendored
Normal file
33
vendor/golang.org/x/net/icmp/listen_stub.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
|
||||
|
||||
package icmp
|
||||
|
||||
// ListenPacket listens for incoming ICMP packets addressed to
|
||||
// address. See net.Dial for the syntax of address.
|
||||
//
|
||||
// For non-privileged datagram-oriented ICMP endpoints, network must
|
||||
// be "udp4" or "udp6". The endpoint allows to read, write a few
|
||||
// limited ICMP messages such as echo request and echo reply.
|
||||
// Currently only Darwin and Linux support this.
|
||||
//
|
||||
// Examples:
|
||||
// ListenPacket("udp4", "192.168.0.1")
|
||||
// ListenPacket("udp4", "0.0.0.0")
|
||||
// ListenPacket("udp6", "fe80::1%en0")
|
||||
// ListenPacket("udp6", "::")
|
||||
//
|
||||
// For privileged raw ICMP endpoints, network must be "ip4" or "ip6"
|
||||
// followed by a colon and an ICMP protocol number or name.
|
||||
//
|
||||
// Examples:
|
||||
// ListenPacket("ip4:icmp", "192.168.0.1")
|
||||
// ListenPacket("ip4:1", "0.0.0.0")
|
||||
// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0")
|
||||
// ListenPacket("ip6:58", "::")
|
||||
func ListenPacket(network, address string) (*PacketConn, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
162
vendor/golang.org/x/net/icmp/message.go
generated
vendored
Normal file
162
vendor/golang.org/x/net/icmp/message.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package icmp provides basic functions for the manipulation of
|
||||
// messages used in the Internet Control Message Protocols,
|
||||
// ICMPv4 and ICMPv6.
|
||||
//
|
||||
// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443.
|
||||
// Multi-part message support for ICMP is defined in RFC 4884.
|
||||
// ICMP extensions for MPLS are defined in RFC 4950.
|
||||
// ICMP extensions for interface and next-hop identification are
|
||||
// defined in RFC 5837.
|
||||
// PROBE: A utility for probing interfaces is defined in RFC 8335.
|
||||
package icmp // import "golang.org/x/net/icmp"
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"net"
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/net/internal/iana"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9.
|
||||
|
||||
var (
|
||||
errInvalidConn = errors.New("invalid connection")
|
||||
errInvalidProtocol = errors.New("invalid protocol")
|
||||
errMessageTooShort = errors.New("message too short")
|
||||
errHeaderTooShort = errors.New("header too short")
|
||||
errBufferTooShort = errors.New("buffer too short")
|
||||
errInvalidBody = errors.New("invalid body")
|
||||
errNoExtension = errors.New("no extension")
|
||||
errInvalidExtension = errors.New("invalid extension")
|
||||
errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH)
|
||||
)
|
||||
|
||||
func checksum(b []byte) uint16 {
|
||||
csumcv := len(b) - 1 // checksum coverage
|
||||
s := uint32(0)
|
||||
for i := 0; i < csumcv; i += 2 {
|
||||
s += uint32(b[i+1])<<8 | uint32(b[i])
|
||||
}
|
||||
if csumcv&1 == 0 {
|
||||
s += uint32(b[csumcv])
|
||||
}
|
||||
s = s>>16 + s&0xffff
|
||||
s = s + s>>16
|
||||
return ^uint16(s)
|
||||
}
|
||||
|
||||
// A Type represents an ICMP message type.
|
||||
type Type interface {
|
||||
Protocol() int
|
||||
}
|
||||
|
||||
// A Message represents an ICMP message.
|
||||
type Message struct {
|
||||
Type Type // type, either ipv4.ICMPType or ipv6.ICMPType
|
||||
Code int // code
|
||||
Checksum int // checksum
|
||||
Body MessageBody // body
|
||||
}
|
||||
|
||||
// Marshal returns the binary encoding of the ICMP message m.
|
||||
//
|
||||
// For an ICMPv4 message, the returned message always contains the
|
||||
// calculated checksum field.
|
||||
//
|
||||
// For an ICMPv6 message, the returned message contains the calculated
|
||||
// checksum field when psh is not nil, otherwise the kernel will
|
||||
// compute the checksum field during the message transmission.
|
||||
// When psh is not nil, it must be the pseudo header for IPv6.
|
||||
func (m *Message) Marshal(psh []byte) ([]byte, error) {
|
||||
var mtype byte
|
||||
switch typ := m.Type.(type) {
|
||||
case ipv4.ICMPType:
|
||||
mtype = byte(typ)
|
||||
case ipv6.ICMPType:
|
||||
mtype = byte(typ)
|
||||
default:
|
||||
return nil, errInvalidProtocol
|
||||
}
|
||||
b := []byte{mtype, byte(m.Code), 0, 0}
|
||||
proto := m.Type.Protocol()
|
||||
if proto == iana.ProtocolIPv6ICMP && psh != nil {
|
||||
b = append(psh, b...)
|
||||
}
|
||||
if m.Body != nil && m.Body.Len(proto) != 0 {
|
||||
mb, err := m.Body.Marshal(proto)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, mb...)
|
||||
}
|
||||
if proto == iana.ProtocolIPv6ICMP {
|
||||
if psh == nil { // cannot calculate checksum here
|
||||
return b, nil
|
||||
}
|
||||
off, l := 2*net.IPv6len, len(b)-len(psh)
|
||||
binary.BigEndian.PutUint32(b[off:off+4], uint32(l))
|
||||
}
|
||||
s := checksum(b)
|
||||
// Place checksum back in header; using ^= avoids the
|
||||
// assumption the checksum bytes are zero.
|
||||
b[len(psh)+2] ^= byte(s)
|
||||
b[len(psh)+3] ^= byte(s >> 8)
|
||||
return b[len(psh):], nil
|
||||
}
|
||||
|
||||
var parseFns = map[Type]func(int, Type, []byte) (MessageBody, error){
|
||||
ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach,
|
||||
ipv4.ICMPTypeTimeExceeded: parseTimeExceeded,
|
||||
ipv4.ICMPTypeParameterProblem: parseParamProb,
|
||||
|
||||
ipv4.ICMPTypeEcho: parseEcho,
|
||||
ipv4.ICMPTypeEchoReply: parseEcho,
|
||||
ipv4.ICMPTypeExtendedEchoRequest: parseExtendedEchoRequest,
|
||||
ipv4.ICMPTypeExtendedEchoReply: parseExtendedEchoReply,
|
||||
|
||||
ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach,
|
||||
ipv6.ICMPTypePacketTooBig: parsePacketTooBig,
|
||||
ipv6.ICMPTypeTimeExceeded: parseTimeExceeded,
|
||||
ipv6.ICMPTypeParameterProblem: parseParamProb,
|
||||
|
||||
ipv6.ICMPTypeEchoRequest: parseEcho,
|
||||
ipv6.ICMPTypeEchoReply: parseEcho,
|
||||
ipv6.ICMPTypeExtendedEchoRequest: parseExtendedEchoRequest,
|
||||
ipv6.ICMPTypeExtendedEchoReply: parseExtendedEchoReply,
|
||||
}
|
||||
|
||||
// ParseMessage parses b as an ICMP message.
|
||||
// The provided proto must be either the ICMPv4 or ICMPv6 protocol
|
||||
// number.
|
||||
func ParseMessage(proto int, b []byte) (*Message, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
var err error
|
||||
m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))}
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
m.Type = ipv4.ICMPType(b[0])
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
m.Type = ipv6.ICMPType(b[0])
|
||||
default:
|
||||
return nil, errInvalidProtocol
|
||||
}
|
||||
if fn, ok := parseFns[m.Type]; !ok {
|
||||
m.Body, err = parseRawBody(proto, b[4:])
|
||||
} else {
|
||||
m.Body, err = fn(proto, m.Type, b[4:])
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
52
vendor/golang.org/x/net/icmp/messagebody.go
generated
vendored
Normal file
52
vendor/golang.org/x/net/icmp/messagebody.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
// A MessageBody represents an ICMP message body.
|
||||
type MessageBody interface {
|
||||
// Len returns the length of ICMP message body.
|
||||
// The provided proto must be either the ICMPv4 or ICMPv6
|
||||
// protocol number.
|
||||
Len(proto int) int
|
||||
|
||||
// Marshal returns the binary encoding of ICMP message body.
|
||||
// The provided proto must be either the ICMPv4 or ICMPv6
|
||||
// protocol number.
|
||||
Marshal(proto int) ([]byte, error)
|
||||
}
|
||||
|
||||
// A RawBody represents a raw message body.
|
||||
//
|
||||
// A raw message body is excluded from message processing and can be
|
||||
// used to construct applications such as protocol conformance
|
||||
// testing.
|
||||
type RawBody struct {
|
||||
Data []byte // data
|
||||
}
|
||||
|
||||
// Len implements the Len method of MessageBody interface.
|
||||
func (p *RawBody) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
return len(p.Data)
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of MessageBody interface.
|
||||
func (p *RawBody) Marshal(proto int) ([]byte, error) {
|
||||
return p.Data, nil
|
||||
}
|
||||
|
||||
// parseRawBody parses b as an ICMP message body.
|
||||
func parseRawBody(proto int, b []byte) (MessageBody, error) {
|
||||
p := &RawBody{Data: make([]byte, len(b))}
|
||||
copy(p.Data, b)
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// A DefaultMessageBody represents the default message body.
|
||||
//
|
||||
// Deprecated: Use RawBody instead.
|
||||
type DefaultMessageBody = RawBody
|
77
vendor/golang.org/x/net/icmp/mpls.go
generated
vendored
Normal file
77
vendor/golang.org/x/net/icmp/mpls.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// MPLSLabel represents an MPLS label stack entry.
|
||||
type MPLSLabel struct {
|
||||
Label int // label value
|
||||
TC int // traffic class; formerly experimental use
|
||||
S bool // bottom of stack
|
||||
TTL int // time to live
|
||||
}
|
||||
|
||||
const (
|
||||
classMPLSLabelStack = 1
|
||||
typeIncomingMPLSLabelStack = 1
|
||||
)
|
||||
|
||||
// MPLSLabelStack represents an MPLS label stack.
|
||||
type MPLSLabelStack struct {
|
||||
Class int // extension object class number
|
||||
Type int // extension object sub-type
|
||||
Labels []MPLSLabel
|
||||
}
|
||||
|
||||
// Len implements the Len method of Extension interface.
|
||||
func (ls *MPLSLabelStack) Len(proto int) int {
|
||||
return 4 + (4 * len(ls.Labels))
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of Extension interface.
|
||||
func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) {
|
||||
b := make([]byte, ls.Len(proto))
|
||||
if err := ls.marshal(proto, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (ls *MPLSLabelStack) marshal(proto int, b []byte) error {
|
||||
l := ls.Len(proto)
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(l))
|
||||
b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack
|
||||
off := 4
|
||||
for _, ll := range ls.Labels {
|
||||
b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0)
|
||||
b[off+2] |= byte(ll.TC << 1 & 0x0e)
|
||||
if ll.S {
|
||||
b[off+2] |= 0x1
|
||||
}
|
||||
b[off+3] = byte(ll.TTL)
|
||||
off += 4
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseMPLSLabelStack(b []byte) (Extension, error) {
|
||||
ls := &MPLSLabelStack{
|
||||
Class: int(b[2]),
|
||||
Type: int(b[3]),
|
||||
}
|
||||
for b = b[4:]; len(b) >= 4; b = b[4:] {
|
||||
ll := MPLSLabel{
|
||||
Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4,
|
||||
TC: int(b[2]&0x0e) >> 1,
|
||||
TTL: int(b[3]),
|
||||
}
|
||||
if b[2]&0x1 != 0 {
|
||||
ll.S = true
|
||||
}
|
||||
ls.Labels = append(ls.Labels, ll)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
129
vendor/golang.org/x/net/icmp/multipart.go
generated
vendored
Normal file
129
vendor/golang.org/x/net/icmp/multipart.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import "golang.org/x/net/internal/iana"
|
||||
|
||||
// multipartMessageBodyDataLen takes b as an original datagram and
|
||||
// exts as extensions, and returns a required length for message body
|
||||
// and a required length for a padded original datagram in wire
|
||||
// format.
|
||||
func multipartMessageBodyDataLen(proto int, withOrigDgram bool, b []byte, exts []Extension) (bodyLen, dataLen int) {
|
||||
bodyLen = 4 // length of leading octets
|
||||
var extLen int
|
||||
var rawExt bool // raw extension may contain an empty object
|
||||
for _, ext := range exts {
|
||||
extLen += ext.Len(proto)
|
||||
if _, ok := ext.(*RawExtension); ok {
|
||||
rawExt = true
|
||||
}
|
||||
}
|
||||
if extLen > 0 && withOrigDgram {
|
||||
dataLen = multipartMessageOrigDatagramLen(proto, b)
|
||||
} else {
|
||||
dataLen = len(b)
|
||||
}
|
||||
if extLen > 0 || rawExt {
|
||||
bodyLen += 4 // length of extension header
|
||||
}
|
||||
bodyLen += dataLen + extLen
|
||||
return bodyLen, dataLen
|
||||
}
|
||||
|
||||
// multipartMessageOrigDatagramLen takes b as an original datagram,
|
||||
// and returns a required length for a padded orignal datagram in wire
|
||||
// format.
|
||||
func multipartMessageOrigDatagramLen(proto int, b []byte) int {
|
||||
roundup := func(b []byte, align int) int {
|
||||
// According to RFC 4884, the padded original datagram
|
||||
// field must contain at least 128 octets.
|
||||
if len(b) < 128 {
|
||||
return 128
|
||||
}
|
||||
r := len(b)
|
||||
return (r + align - 1) &^ (align - 1)
|
||||
}
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
return roundup(b, 4)
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
return roundup(b, 8)
|
||||
default:
|
||||
return len(b)
|
||||
}
|
||||
}
|
||||
|
||||
// marshalMultipartMessageBody takes data as an original datagram and
|
||||
// exts as extesnsions, and returns a binary encoding of message body.
|
||||
// It can be used for non-multipart message bodies when exts is nil.
|
||||
func marshalMultipartMessageBody(proto int, withOrigDgram bool, data []byte, exts []Extension) ([]byte, error) {
|
||||
bodyLen, dataLen := multipartMessageBodyDataLen(proto, withOrigDgram, data, exts)
|
||||
b := make([]byte, bodyLen)
|
||||
copy(b[4:], data)
|
||||
if len(exts) > 0 {
|
||||
b[4+dataLen] = byte(extensionVersion << 4)
|
||||
off := 4 + dataLen + 4 // leading octets, data, extension header
|
||||
for _, ext := range exts {
|
||||
switch ext := ext.(type) {
|
||||
case *MPLSLabelStack:
|
||||
if err := ext.marshal(proto, b[off:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
off += ext.Len(proto)
|
||||
case *InterfaceInfo:
|
||||
attrs, l := ext.attrsAndLen(proto)
|
||||
if err := ext.marshal(proto, b[off:], attrs, l); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
off += ext.Len(proto)
|
||||
case *InterfaceIdent:
|
||||
if err := ext.marshal(proto, b[off:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
off += ext.Len(proto)
|
||||
case *RawExtension:
|
||||
copy(b[off:], ext.Data)
|
||||
off += ext.Len(proto)
|
||||
}
|
||||
}
|
||||
s := checksum(b[4+dataLen:])
|
||||
b[4+dataLen+2] ^= byte(s)
|
||||
b[4+dataLen+3] ^= byte(s >> 8)
|
||||
if withOrigDgram {
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
b[1] = byte(dataLen / 4)
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
b[0] = byte(dataLen / 8)
|
||||
}
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parseMultipartMessageBody parses b as either a non-multipart
|
||||
// message body or a multipart message body.
|
||||
func parseMultipartMessageBody(proto int, typ Type, b []byte) ([]byte, []Extension, error) {
|
||||
var l int
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
l = 4 * int(b[1])
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
l = 8 * int(b[0])
|
||||
}
|
||||
if len(b) == 4 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
exts, l, err := parseExtensions(typ, b[4:], l)
|
||||
if err != nil {
|
||||
l = len(b) - 4
|
||||
}
|
||||
var data []byte
|
||||
if l > 0 {
|
||||
data = make([]byte, l)
|
||||
copy(data, b[4:])
|
||||
}
|
||||
return data, exts, nil
|
||||
}
|
43
vendor/golang.org/x/net/icmp/packettoobig.go
generated
vendored
Normal file
43
vendor/golang.org/x/net/icmp/packettoobig.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// A PacketTooBig represents an ICMP packet too big message body.
|
||||
type PacketTooBig struct {
|
||||
MTU int // maximum transmission unit of the nexthop link
|
||||
Data []byte // data, known as original datagram field
|
||||
}
|
||||
|
||||
// Len implements the Len method of MessageBody interface.
|
||||
func (p *PacketTooBig) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
return 4 + len(p.Data)
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of MessageBody interface.
|
||||
func (p *PacketTooBig) Marshal(proto int) ([]byte, error) {
|
||||
b := make([]byte, 4+len(p.Data))
|
||||
binary.BigEndian.PutUint32(b[:4], uint32(p.MTU))
|
||||
copy(b[4:], p.Data)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parsePacketTooBig parses b as an ICMP packet too big message body.
|
||||
func parsePacketTooBig(proto int, _ Type, b []byte) (MessageBody, error) {
|
||||
bodyLen := len(b)
|
||||
if bodyLen < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))}
|
||||
if bodyLen > 4 {
|
||||
p.Data = make([]byte, bodyLen-4)
|
||||
copy(p.Data, b[4:])
|
||||
}
|
||||
return p, nil
|
||||
}
|
72
vendor/golang.org/x/net/icmp/paramprob.go
generated
vendored
Normal file
72
vendor/golang.org/x/net/icmp/paramprob.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"golang.org/x/net/internal/iana"
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
|
||||
// A ParamProb represents an ICMP parameter problem message body.
|
||||
type ParamProb struct {
|
||||
Pointer uintptr // offset within the data where the error was detected
|
||||
Data []byte // data, known as original datagram field
|
||||
Extensions []Extension // extensions
|
||||
}
|
||||
|
||||
// Len implements the Len method of MessageBody interface.
|
||||
func (p *ParamProb) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions)
|
||||
return l
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of MessageBody interface.
|
||||
func (p *ParamProb) Marshal(proto int) ([]byte, error) {
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
if !validExtensions(ipv4.ICMPTypeParameterProblem, p.Extensions) {
|
||||
return nil, errInvalidExtension
|
||||
}
|
||||
b, err := marshalMultipartMessageBody(proto, true, p.Data, p.Extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b[0] = byte(p.Pointer)
|
||||
return b, nil
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
b := make([]byte, p.Len(proto))
|
||||
binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer))
|
||||
copy(b[4:], p.Data)
|
||||
return b, nil
|
||||
default:
|
||||
return nil, errInvalidProtocol
|
||||
}
|
||||
}
|
||||
|
||||
// parseParamProb parses b as an ICMP parameter problem message body.
|
||||
func parseParamProb(proto int, typ Type, b []byte) (MessageBody, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
p := &ParamProb{}
|
||||
if proto == iana.ProtocolIPv6ICMP {
|
||||
p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4]))
|
||||
p.Data = make([]byte, len(b)-4)
|
||||
copy(p.Data, b[4:])
|
||||
return p, nil
|
||||
}
|
||||
p.Pointer = uintptr(b[0])
|
||||
var err error
|
||||
p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
11
vendor/golang.org/x/net/icmp/sys_freebsd.go
generated
vendored
Normal file
11
vendor/golang.org/x/net/icmp/sys_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import "syscall"
|
||||
|
||||
func init() {
|
||||
freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate")
|
||||
}
|
57
vendor/golang.org/x/net/icmp/timeexceeded.go
generated
vendored
Normal file
57
vendor/golang.org/x/net/icmp/timeexceeded.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package icmp
|
||||
|
||||
import (
|
||||
"golang.org/x/net/internal/iana"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// A TimeExceeded represents an ICMP time exceeded message body.
|
||||
type TimeExceeded struct {
|
||||
Data []byte // data, known as original datagram field
|
||||
Extensions []Extension // extensions
|
||||
}
|
||||
|
||||
// Len implements the Len method of MessageBody interface.
|
||||
func (p *TimeExceeded) Len(proto int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions)
|
||||
return l
|
||||
}
|
||||
|
||||
// Marshal implements the Marshal method of MessageBody interface.
|
||||
func (p *TimeExceeded) Marshal(proto int) ([]byte, error) {
|
||||
var typ Type
|
||||
switch proto {
|
||||
case iana.ProtocolICMP:
|
||||
typ = ipv4.ICMPTypeTimeExceeded
|
||||
case iana.ProtocolIPv6ICMP:
|
||||
typ = ipv6.ICMPTypeTimeExceeded
|
||||
default:
|
||||
return nil, errInvalidProtocol
|
||||
}
|
||||
if !validExtensions(typ, p.Extensions) {
|
||||
return nil, errInvalidExtension
|
||||
}
|
||||
return marshalMultipartMessageBody(proto, true, p.Data, p.Extensions)
|
||||
}
|
||||
|
||||
// parseTimeExceeded parses b as an ICMP time exceeded message body.
|
||||
func parseTimeExceeded(proto int, typ Type, b []byte) (MessageBody, error) {
|
||||
if len(b) < 4 {
|
||||
return nil, errMessageTooShort
|
||||
}
|
||||
p := &TimeExceeded{}
|
||||
var err error
|
||||
p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
734
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
Normal file
734
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
Normal file
@ -0,0 +1,734 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
// Package idna implements IDNA2008 using the compatibility processing
|
||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||
// deal with the transition from IDNA2003.
|
||||
//
|
||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
||||
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||
// differences between these two standards.
|
||||
package idna // import "golang.org/x/net/idna"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/secure/bidirule"
|
||||
"golang.org/x/text/unicode/bidi"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||
// evaluated string as lookup.
|
||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||
// Other strategies are also viable, though:
|
||||
// Option 1) Return an empty string in case of error, but allow the user to
|
||||
// specify explicitly which errors to ignore.
|
||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||
// string, otherwise return the empty string in case of error.
|
||||
// Option 3) Option 1 and 2.
|
||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||
// needed, and document that the return string may not be empty in case of
|
||||
// error in the future.
|
||||
// I think Option 1 is best, but it is quite opinionated.
|
||||
|
||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||
func ToASCII(s string) (string, error) {
|
||||
return Punycode.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||
func ToUnicode(s string) (string, error) {
|
||||
return Punycode.process(s, false)
|
||||
}
|
||||
|
||||
// An Option configures a Profile at creation time.
|
||||
type Option func(*options)
|
||||
|
||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||
// compatibility. It is used by most browsers when resolving domain names. This
|
||||
// option is only meaningful if combined with MapForLookup.
|
||||
func Transitional(transitional bool) Option {
|
||||
return func(o *options) { o.transitional = true }
|
||||
}
|
||||
|
||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||
// are longer than allowed by the RFC.
|
||||
func VerifyDNSLength(verify bool) Option {
|
||||
return func(o *options) { o.verifyDNSLength = verify }
|
||||
}
|
||||
|
||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||
//
|
||||
// This is the behavior suggested by the UTS #46 and is adopted by some
|
||||
// browsers.
|
||||
func RemoveLeadingDots(remove bool) Option {
|
||||
return func(o *options) { o.removeLeadingDots = remove }
|
||||
}
|
||||
|
||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||
func ValidateLabels(enable bool) Option {
|
||||
return func(o *options) {
|
||||
// Don't override existing mappings, but set one that at least checks
|
||||
// normalization if it is not set.
|
||||
if o.mapping == nil && enable {
|
||||
o.mapping = normalize
|
||||
}
|
||||
o.trie = trie
|
||||
o.validateLabels = enable
|
||||
o.fromPuny = validateFromPunycode
|
||||
}
|
||||
}
|
||||
|
||||
// StrictDomainName limits the set of permissible ASCII characters to those
|
||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
||||
//
|
||||
// This option is useful, for instance, for browsers that allow characters
|
||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
||||
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
||||
func StrictDomainName(use bool) Option {
|
||||
return func(o *options) {
|
||||
o.trie = trie
|
||||
o.useSTD3Rules = use
|
||||
o.fromPuny = validateFromPunycode
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: the following options pull in tables. The tables should not be linked
|
||||
// in as long as the options are not used.
|
||||
|
||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||
// that relies on proper validation of labels should include this rule.
|
||||
func BidiRule() Option {
|
||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||
}
|
||||
|
||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||
func ValidateForRegistration() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateRegistration
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
VerifyDNSLength(true)(o)
|
||||
BidiRule()(o)
|
||||
}
|
||||
}
|
||||
|
||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||
// transformed for domain name lookup according to the requirements set out in
|
||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||
// to add this check.
|
||||
//
|
||||
// The mappings include normalization and mapping case, width and other
|
||||
// compatibility mappings.
|
||||
func MapForLookup() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateAndMap
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
transitional bool
|
||||
useSTD3Rules bool
|
||||
validateLabels bool
|
||||
verifyDNSLength bool
|
||||
removeLeadingDots bool
|
||||
|
||||
trie *idnaTrie
|
||||
|
||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||
fromPuny func(p *Profile, s string) error
|
||||
|
||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||
mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
|
||||
|
||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||
// defined in RFC 5893.
|
||||
bidirule func(s string) bool
|
||||
}
|
||||
|
||||
// A Profile defines the configuration of an IDNA mapper.
|
||||
type Profile struct {
|
||||
options
|
||||
}
|
||||
|
||||
func apply(o *options, opts []Option) {
|
||||
for _, f := range opts {
|
||||
f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Profile.
|
||||
//
|
||||
// With no options, the returned Profile is the most permissive and equals the
|
||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||
// for lookup and registration purposes respectively, which can be tailored by
|
||||
// adding more fine-grained options, where later options override earlier
|
||||
// options.
|
||||
func New(o ...Option) *Profile {
|
||||
p := &Profile{}
|
||||
apply(&p.options, o)
|
||||
return p
|
||||
}
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToASCII(s string) (string, error) {
|
||||
return p.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||
pp := *p
|
||||
pp.transitional = false
|
||||
return pp.process(s, false)
|
||||
}
|
||||
|
||||
// String reports a string with a description of the profile for debugging
|
||||
// purposes. The string format may change with different versions.
|
||||
func (p *Profile) String() string {
|
||||
s := ""
|
||||
if p.transitional {
|
||||
s = "Transitional"
|
||||
} else {
|
||||
s = "NonTransitional"
|
||||
}
|
||||
if p.useSTD3Rules {
|
||||
s += ":UseSTD3Rules"
|
||||
}
|
||||
if p.validateLabels {
|
||||
s += ":ValidateLabels"
|
||||
}
|
||||
if p.verifyDNSLength {
|
||||
s += ":VerifyDNSLength"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||
// of validation.
|
||||
Punycode *Profile = punycode
|
||||
|
||||
// Lookup is the recommended profile for looking up domain names, according
|
||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||
// change over time.
|
||||
Lookup *Profile = lookup
|
||||
|
||||
// Display is the recommended profile for displaying domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Display *Profile = display
|
||||
|
||||
// Registration is the recommended profile for checking whether a given
|
||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||
Registration *Profile = registration
|
||||
|
||||
punycode = &Profile{}
|
||||
lookup = &Profile{options{
|
||||
transitional: true,
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
display = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
registration = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
verifyDNSLength: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateRegistration,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
|
||||
// TODO: profiles
|
||||
// Register: recommended for approving domain names: don't do any mappings
|
||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||
)
|
||||
|
||||
type labelError struct{ label, code_ string }
|
||||
|
||||
func (e labelError) code() string { return e.code_ }
|
||||
func (e labelError) Error() string {
|
||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||
}
|
||||
|
||||
type runeError rune
|
||||
|
||||
func (e runeError) code() string { return "P1" }
|
||||
func (e runeError) Error() string {
|
||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||
}
|
||||
|
||||
// process implements the algorithm described in section 4 of UTS #46,
|
||||
// see https://www.unicode.org/reports/tr46.
|
||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||
var err error
|
||||
var isBidi bool
|
||||
if p.mapping != nil {
|
||||
s, isBidi, err = p.mapping(p, s)
|
||||
}
|
||||
// Remove leading empty labels.
|
||||
if p.removeLeadingDots {
|
||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||
}
|
||||
}
|
||||
// TODO: allow for a quick check of the tables data.
|
||||
// It seems like we should only create this error on ToASCII, but the
|
||||
// UTS 46 conformance tests suggests we should always check this.
|
||||
if err == nil && p.verifyDNSLength && s == "" {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
labels := labelIter{orig: s}
|
||||
for ; !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if label == "" {
|
||||
// Empty labels are not okay. The label iterator skips the last
|
||||
// label if it is empty.
|
||||
if err == nil && p.verifyDNSLength {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(label, acePrefix) {
|
||||
u, err2 := decode(label[len(acePrefix):])
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
// Spec says keep the old label.
|
||||
continue
|
||||
}
|
||||
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
|
||||
labels.set(u)
|
||||
if err == nil && p.validateLabels {
|
||||
err = p.fromPuny(p, u)
|
||||
}
|
||||
if err == nil {
|
||||
// This should be called on NonTransitional, according to the
|
||||
// spec, but that currently does not have any effect. Use the
|
||||
// original profile to preserve options.
|
||||
err = p.validateLabel(u)
|
||||
}
|
||||
} else if err == nil {
|
||||
err = p.validateLabel(label)
|
||||
}
|
||||
}
|
||||
if isBidi && p.bidirule != nil && err == nil {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
if !p.bidirule(labels.label()) {
|
||||
err = &labelError{s, "B"}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if toASCII {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if !ascii(label) {
|
||||
a, err2 := encode(acePrefix, label)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
label = a
|
||||
labels.set(a)
|
||||
}
|
||||
n := len(label)
|
||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||
err = &labelError{label, "A4"}
|
||||
}
|
||||
}
|
||||
}
|
||||
s = labels.result()
|
||||
if toASCII && p.verifyDNSLength && err == nil {
|
||||
// Compute the length of the domain name minus the root label and its dot.
|
||||
n := len(s)
|
||||
if n > 0 && s[n-1] == '.' {
|
||||
n--
|
||||
}
|
||||
if len(s) < 1 || n > 253 {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
|
||||
// TODO: consider first doing a quick check to see if any of these checks
|
||||
// need to be done. This will make it slower in the general case, but
|
||||
// faster in the common case.
|
||||
mapped = norm.NFC.String(s)
|
||||
isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
|
||||
return mapped, isBidi, nil
|
||||
}
|
||||
|
||||
func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
|
||||
// TODO: filter need for normalization in loop below.
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return s, false, &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
return s, bidi, runeError(utf8.RuneError)
|
||||
}
|
||||
bidi = bidi || info(v).isBidi(s[i:])
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||
// for strict conformance to IDNA2008.
|
||||
case valid, deviation:
|
||||
case disallowed, mapped, unknown, ignored:
|
||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||
return s, bidi, runeError(r)
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return s, bidi, nil
|
||||
}
|
||||
|
||||
func (c info) isBidi(s string) bool {
|
||||
if !c.isMapped() {
|
||||
return c&attributesMask == rtl
|
||||
}
|
||||
// TODO: also store bidi info for mapped data. This is possible, but a bit
|
||||
// cumbersome and not for the common case.
|
||||
p, _ := bidi.LookupString(s)
|
||||
switch p.Class() {
|
||||
case bidi.R, bidi.AL, bidi.AN:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
|
||||
var (
|
||||
b []byte
|
||||
k int
|
||||
)
|
||||
// combinedInfoBits contains the or-ed bits of all runes. We use this
|
||||
// to derive the mayNeedNorm bit later. This may trigger normalization
|
||||
// overeagerly, but it will not do so in the common case. The end result
|
||||
// is another 10% saving on BenchmarkProfile for the common case.
|
||||
var combinedInfoBits info
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
b = append(b, s[k:i]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
k = len(s)
|
||||
if err == nil {
|
||||
err = runeError(utf8.RuneError)
|
||||
}
|
||||
break
|
||||
}
|
||||
combinedInfoBits |= info(v)
|
||||
bidi = bidi || info(v).isBidi(s[i:])
|
||||
start := i
|
||||
i += sz
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
case valid:
|
||||
continue
|
||||
case disallowed:
|
||||
if err == nil {
|
||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||
err = runeError(r)
|
||||
}
|
||||
continue
|
||||
case mapped, deviation:
|
||||
b = append(b, s[k:start]...)
|
||||
b = info(v).appendMapping(b, s[start:i])
|
||||
case ignored:
|
||||
b = append(b, s[k:start]...)
|
||||
// drop the rune
|
||||
case unknown:
|
||||
b = append(b, s[k:start]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
}
|
||||
k = i
|
||||
}
|
||||
if k == 0 {
|
||||
// No changes so far.
|
||||
if combinedInfoBits&mayNeedNorm != 0 {
|
||||
s = norm.NFC.String(s)
|
||||
}
|
||||
} else {
|
||||
b = append(b, s[k:]...)
|
||||
if norm.NFC.QuickSpan(b) != len(b) {
|
||||
b = norm.NFC.Bytes(b)
|
||||
}
|
||||
// TODO: the punycode converters require strings as input.
|
||||
s = string(b)
|
||||
}
|
||||
return s, bidi, err
|
||||
}
|
||||
|
||||
// A labelIter allows iterating over domain name labels.
|
||||
type labelIter struct {
|
||||
orig string
|
||||
slice []string
|
||||
curStart int
|
||||
curEnd int
|
||||
i int
|
||||
}
|
||||
|
||||
func (l *labelIter) reset() {
|
||||
l.curStart = 0
|
||||
l.curEnd = 0
|
||||
l.i = 0
|
||||
}
|
||||
|
||||
func (l *labelIter) done() bool {
|
||||
return l.curStart >= len(l.orig)
|
||||
}
|
||||
|
||||
func (l *labelIter) result() string {
|
||||
if l.slice != nil {
|
||||
return strings.Join(l.slice, ".")
|
||||
}
|
||||
return l.orig
|
||||
}
|
||||
|
||||
func (l *labelIter) label() string {
|
||||
if l.slice != nil {
|
||||
return l.slice[l.i]
|
||||
}
|
||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||
l.curEnd = l.curStart + p
|
||||
if p == -1 {
|
||||
l.curEnd = len(l.orig)
|
||||
}
|
||||
return l.orig[l.curStart:l.curEnd]
|
||||
}
|
||||
|
||||
// next sets the value to the next label. It skips the last label if it is empty.
|
||||
func (l *labelIter) next() {
|
||||
l.i++
|
||||
if l.slice != nil {
|
||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
} else {
|
||||
l.curStart = l.curEnd + 1
|
||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *labelIter) set(s string) {
|
||||
if l.slice == nil {
|
||||
l.slice = strings.Split(l.orig, ".")
|
||||
}
|
||||
l.slice[l.i] = s
|
||||
}
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
func (p *Profile) simplify(cat category) category {
|
||||
switch cat {
|
||||
case disallowedSTD3Mapped:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = mapped
|
||||
}
|
||||
case disallowedSTD3Valid:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = valid
|
||||
}
|
||||
case deviation:
|
||||
if !p.transitional {
|
||||
cat = valid
|
||||
}
|
||||
case validNV8, validXV8:
|
||||
// TODO: handle V2008
|
||||
cat = valid
|
||||
}
|
||||
return cat
|
||||
}
|
||||
|
||||
func validateFromPunycode(p *Profile, s string) error {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return &labelError{s, "V1"}
|
||||
}
|
||||
// TODO: detect whether string may have to be normalized in the following
|
||||
// loop.
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
return runeError(utf8.RuneError)
|
||||
}
|
||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||
return &labelError{s, "V6"}
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
zwnj = "\u200c"
|
||||
zwj = "\u200d"
|
||||
)
|
||||
|
||||
type joinState int8
|
||||
|
||||
const (
|
||||
stateStart joinState = iota
|
||||
stateVirama
|
||||
stateBefore
|
||||
stateBeforeVirama
|
||||
stateAfter
|
||||
stateFAIL
|
||||
)
|
||||
|
||||
var joinStates = [][numJoinTypes]joinState{
|
||||
stateStart: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateVirama,
|
||||
},
|
||||
stateVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
},
|
||||
stateBefore: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
joinZWNJ: stateAfter,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateBeforeVirama,
|
||||
},
|
||||
stateBeforeVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
},
|
||||
stateAfter: {
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateAfter,
|
||||
joiningR: stateStart,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||
},
|
||||
stateFAIL: {
|
||||
0: stateFAIL,
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateFAIL,
|
||||
joiningT: stateFAIL,
|
||||
joiningR: stateFAIL,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateFAIL,
|
||||
},
|
||||
}
|
||||
|
||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||
// already implicitly satisfied by the overall implementation.
|
||||
func (p *Profile) validateLabel(s string) (err error) {
|
||||
if s == "" {
|
||||
if p.verifyDNSLength {
|
||||
return &labelError{s, "A4"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !p.validateLabels {
|
||||
return nil
|
||||
}
|
||||
trie := p.trie // p.validateLabels is only set if trie is set.
|
||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||
return &labelError{s, "V2"}
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return &labelError{s, "V3"}
|
||||
}
|
||||
// TODO: merge the use of this in the trie.
|
||||
v, sz := trie.lookupString(s)
|
||||
x := info(v)
|
||||
if x.isModifier() {
|
||||
return &labelError{s, "V5"}
|
||||
}
|
||||
// Quickly return in the absence of zero-width (non) joiners.
|
||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||
return nil
|
||||
}
|
||||
st := stateStart
|
||||
for i := 0; ; {
|
||||
jt := x.joinType()
|
||||
if s[i:i+sz] == zwj {
|
||||
jt = joinZWJ
|
||||
} else if s[i:i+sz] == zwnj {
|
||||
jt = joinZWNJ
|
||||
}
|
||||
st = joinStates[st][jt]
|
||||
if x.isViramaModifier() {
|
||||
st = joinStates[st][joinVirama]
|
||||
}
|
||||
if i += sz; i == len(s) {
|
||||
break
|
||||
}
|
||||
v, sz = trie.lookupString(s[i:])
|
||||
x = info(v)
|
||||
}
|
||||
if st == stateFAIL || st == stateAfter {
|
||||
return &labelError{s, "C"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
682
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
Normal file
682
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
Normal file
@ -0,0 +1,682 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
// Package idna implements IDNA2008 using the compatibility processing
|
||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||
// deal with the transition from IDNA2003.
|
||||
//
|
||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
||||
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||
// differences between these two standards.
|
||||
package idna // import "golang.org/x/net/idna"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/secure/bidirule"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||
// evaluated string as lookup.
|
||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||
// Other strategies are also viable, though:
|
||||
// Option 1) Return an empty string in case of error, but allow the user to
|
||||
// specify explicitly which errors to ignore.
|
||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||
// string, otherwise return the empty string in case of error.
|
||||
// Option 3) Option 1 and 2.
|
||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||
// needed, and document that the return string may not be empty in case of
|
||||
// error in the future.
|
||||
// I think Option 1 is best, but it is quite opinionated.
|
||||
|
||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||
func ToASCII(s string) (string, error) {
|
||||
return Punycode.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||
func ToUnicode(s string) (string, error) {
|
||||
return Punycode.process(s, false)
|
||||
}
|
||||
|
||||
// An Option configures a Profile at creation time.
|
||||
type Option func(*options)
|
||||
|
||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||
// compatibility. It is used by most browsers when resolving domain names. This
|
||||
// option is only meaningful if combined with MapForLookup.
|
||||
func Transitional(transitional bool) Option {
|
||||
return func(o *options) { o.transitional = true }
|
||||
}
|
||||
|
||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||
// are longer than allowed by the RFC.
|
||||
func VerifyDNSLength(verify bool) Option {
|
||||
return func(o *options) { o.verifyDNSLength = verify }
|
||||
}
|
||||
|
||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||
//
|
||||
// This is the behavior suggested by the UTS #46 and is adopted by some
|
||||
// browsers.
|
||||
func RemoveLeadingDots(remove bool) Option {
|
||||
return func(o *options) { o.removeLeadingDots = remove }
|
||||
}
|
||||
|
||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||
func ValidateLabels(enable bool) Option {
|
||||
return func(o *options) {
|
||||
// Don't override existing mappings, but set one that at least checks
|
||||
// normalization if it is not set.
|
||||
if o.mapping == nil && enable {
|
||||
o.mapping = normalize
|
||||
}
|
||||
o.trie = trie
|
||||
o.validateLabels = enable
|
||||
o.fromPuny = validateFromPunycode
|
||||
}
|
||||
}
|
||||
|
||||
// StrictDomainName limits the set of permissable ASCII characters to those
|
||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
||||
//
|
||||
// This option is useful, for instance, for browsers that allow characters
|
||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
||||
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
||||
func StrictDomainName(use bool) Option {
|
||||
return func(o *options) {
|
||||
o.trie = trie
|
||||
o.useSTD3Rules = use
|
||||
o.fromPuny = validateFromPunycode
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: the following options pull in tables. The tables should not be linked
|
||||
// in as long as the options are not used.
|
||||
|
||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||
// that relies on proper validation of labels should include this rule.
|
||||
func BidiRule() Option {
|
||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||
}
|
||||
|
||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||
func ValidateForRegistration() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateRegistration
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
VerifyDNSLength(true)(o)
|
||||
BidiRule()(o)
|
||||
}
|
||||
}
|
||||
|
||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||
// transformed for domain name lookup according to the requirements set out in
|
||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||
// to add this check.
|
||||
//
|
||||
// The mappings include normalization and mapping case, width and other
|
||||
// compatibility mappings.
|
||||
func MapForLookup() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateAndMap
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
RemoveLeadingDots(true)(o)
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
transitional bool
|
||||
useSTD3Rules bool
|
||||
validateLabels bool
|
||||
verifyDNSLength bool
|
||||
removeLeadingDots bool
|
||||
|
||||
trie *idnaTrie
|
||||
|
||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||
fromPuny func(p *Profile, s string) error
|
||||
|
||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||
mapping func(p *Profile, s string) (string, error)
|
||||
|
||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||
// defined in RFC 5893.
|
||||
bidirule func(s string) bool
|
||||
}
|
||||
|
||||
// A Profile defines the configuration of a IDNA mapper.
|
||||
type Profile struct {
|
||||
options
|
||||
}
|
||||
|
||||
func apply(o *options, opts []Option) {
|
||||
for _, f := range opts {
|
||||
f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Profile.
|
||||
//
|
||||
// With no options, the returned Profile is the most permissive and equals the
|
||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||
// for lookup and registration purposes respectively, which can be tailored by
|
||||
// adding more fine-grained options, where later options override earlier
|
||||
// options.
|
||||
func New(o ...Option) *Profile {
|
||||
p := &Profile{}
|
||||
apply(&p.options, o)
|
||||
return p
|
||||
}
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToASCII(s string) (string, error) {
|
||||
return p.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||
pp := *p
|
||||
pp.transitional = false
|
||||
return pp.process(s, false)
|
||||
}
|
||||
|
||||
// String reports a string with a description of the profile for debugging
|
||||
// purposes. The string format may change with different versions.
|
||||
func (p *Profile) String() string {
|
||||
s := ""
|
||||
if p.transitional {
|
||||
s = "Transitional"
|
||||
} else {
|
||||
s = "NonTransitional"
|
||||
}
|
||||
if p.useSTD3Rules {
|
||||
s += ":UseSTD3Rules"
|
||||
}
|
||||
if p.validateLabels {
|
||||
s += ":ValidateLabels"
|
||||
}
|
||||
if p.verifyDNSLength {
|
||||
s += ":VerifyDNSLength"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||
// of validation.
|
||||
Punycode *Profile = punycode
|
||||
|
||||
// Lookup is the recommended profile for looking up domain names, according
|
||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||
// change over time.
|
||||
Lookup *Profile = lookup
|
||||
|
||||
// Display is the recommended profile for displaying domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Display *Profile = display
|
||||
|
||||
// Registration is the recommended profile for checking whether a given
|
||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||
Registration *Profile = registration
|
||||
|
||||
punycode = &Profile{}
|
||||
lookup = &Profile{options{
|
||||
transitional: true,
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
removeLeadingDots: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
display = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
removeLeadingDots: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
registration = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
verifyDNSLength: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateRegistration,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
|
||||
// TODO: profiles
|
||||
// Register: recommended for approving domain names: don't do any mappings
|
||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||
)
|
||||
|
||||
type labelError struct{ label, code_ string }
|
||||
|
||||
func (e labelError) code() string { return e.code_ }
|
||||
func (e labelError) Error() string {
|
||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||
}
|
||||
|
||||
type runeError rune
|
||||
|
||||
func (e runeError) code() string { return "P1" }
|
||||
func (e runeError) Error() string {
|
||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||
}
|
||||
|
||||
// process implements the algorithm described in section 4 of UTS #46,
|
||||
// see https://www.unicode.org/reports/tr46.
|
||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||
var err error
|
||||
if p.mapping != nil {
|
||||
s, err = p.mapping(p, s)
|
||||
}
|
||||
// Remove leading empty labels.
|
||||
if p.removeLeadingDots {
|
||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||
}
|
||||
}
|
||||
// It seems like we should only create this error on ToASCII, but the
|
||||
// UTS 46 conformance tests suggests we should always check this.
|
||||
if err == nil && p.verifyDNSLength && s == "" {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
labels := labelIter{orig: s}
|
||||
for ; !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if label == "" {
|
||||
// Empty labels are not okay. The label iterator skips the last
|
||||
// label if it is empty.
|
||||
if err == nil && p.verifyDNSLength {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(label, acePrefix) {
|
||||
u, err2 := decode(label[len(acePrefix):])
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
// Spec says keep the old label.
|
||||
continue
|
||||
}
|
||||
labels.set(u)
|
||||
if err == nil && p.validateLabels {
|
||||
err = p.fromPuny(p, u)
|
||||
}
|
||||
if err == nil {
|
||||
// This should be called on NonTransitional, according to the
|
||||
// spec, but that currently does not have any effect. Use the
|
||||
// original profile to preserve options.
|
||||
err = p.validateLabel(u)
|
||||
}
|
||||
} else if err == nil {
|
||||
err = p.validateLabel(label)
|
||||
}
|
||||
}
|
||||
if toASCII {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if !ascii(label) {
|
||||
a, err2 := encode(acePrefix, label)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
label = a
|
||||
labels.set(a)
|
||||
}
|
||||
n := len(label)
|
||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||
err = &labelError{label, "A4"}
|
||||
}
|
||||
}
|
||||
}
|
||||
s = labels.result()
|
||||
if toASCII && p.verifyDNSLength && err == nil {
|
||||
// Compute the length of the domain name minus the root label and its dot.
|
||||
n := len(s)
|
||||
if n > 0 && s[n-1] == '.' {
|
||||
n--
|
||||
}
|
||||
if len(s) < 1 || n > 253 {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
func normalize(p *Profile, s string) (string, error) {
|
||||
return norm.NFC.String(s), nil
|
||||
}
|
||||
|
||||
func validateRegistration(p *Profile, s string) (string, error) {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return s, &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||
// for strict conformance to IDNA2008.
|
||||
case valid, deviation:
|
||||
case disallowed, mapped, unknown, ignored:
|
||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||
return s, runeError(r)
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func validateAndMap(p *Profile, s string) (string, error) {
|
||||
var (
|
||||
err error
|
||||
b []byte
|
||||
k int
|
||||
)
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
start := i
|
||||
i += sz
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
case valid:
|
||||
continue
|
||||
case disallowed:
|
||||
if err == nil {
|
||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||
err = runeError(r)
|
||||
}
|
||||
continue
|
||||
case mapped, deviation:
|
||||
b = append(b, s[k:start]...)
|
||||
b = info(v).appendMapping(b, s[start:i])
|
||||
case ignored:
|
||||
b = append(b, s[k:start]...)
|
||||
// drop the rune
|
||||
case unknown:
|
||||
b = append(b, s[k:start]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
}
|
||||
k = i
|
||||
}
|
||||
if k == 0 {
|
||||
// No changes so far.
|
||||
s = norm.NFC.String(s)
|
||||
} else {
|
||||
b = append(b, s[k:]...)
|
||||
if norm.NFC.QuickSpan(b) != len(b) {
|
||||
b = norm.NFC.Bytes(b)
|
||||
}
|
||||
// TODO: the punycode converters require strings as input.
|
||||
s = string(b)
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
// A labelIter allows iterating over domain name labels.
|
||||
type labelIter struct {
|
||||
orig string
|
||||
slice []string
|
||||
curStart int
|
||||
curEnd int
|
||||
i int
|
||||
}
|
||||
|
||||
func (l *labelIter) reset() {
|
||||
l.curStart = 0
|
||||
l.curEnd = 0
|
||||
l.i = 0
|
||||
}
|
||||
|
||||
func (l *labelIter) done() bool {
|
||||
return l.curStart >= len(l.orig)
|
||||
}
|
||||
|
||||
func (l *labelIter) result() string {
|
||||
if l.slice != nil {
|
||||
return strings.Join(l.slice, ".")
|
||||
}
|
||||
return l.orig
|
||||
}
|
||||
|
||||
func (l *labelIter) label() string {
|
||||
if l.slice != nil {
|
||||
return l.slice[l.i]
|
||||
}
|
||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||
l.curEnd = l.curStart + p
|
||||
if p == -1 {
|
||||
l.curEnd = len(l.orig)
|
||||
}
|
||||
return l.orig[l.curStart:l.curEnd]
|
||||
}
|
||||
|
||||
// next sets the value to the next label. It skips the last label if it is empty.
|
||||
func (l *labelIter) next() {
|
||||
l.i++
|
||||
if l.slice != nil {
|
||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
} else {
|
||||
l.curStart = l.curEnd + 1
|
||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *labelIter) set(s string) {
|
||||
if l.slice == nil {
|
||||
l.slice = strings.Split(l.orig, ".")
|
||||
}
|
||||
l.slice[l.i] = s
|
||||
}
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
func (p *Profile) simplify(cat category) category {
|
||||
switch cat {
|
||||
case disallowedSTD3Mapped:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = mapped
|
||||
}
|
||||
case disallowedSTD3Valid:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = valid
|
||||
}
|
||||
case deviation:
|
||||
if !p.transitional {
|
||||
cat = valid
|
||||
}
|
||||
case validNV8, validXV8:
|
||||
// TODO: handle V2008
|
||||
cat = valid
|
||||
}
|
||||
return cat
|
||||
}
|
||||
|
||||
func validateFromPunycode(p *Profile, s string) error {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||
return &labelError{s, "V6"}
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
zwnj = "\u200c"
|
||||
zwj = "\u200d"
|
||||
)
|
||||
|
||||
type joinState int8
|
||||
|
||||
const (
|
||||
stateStart joinState = iota
|
||||
stateVirama
|
||||
stateBefore
|
||||
stateBeforeVirama
|
||||
stateAfter
|
||||
stateFAIL
|
||||
)
|
||||
|
||||
var joinStates = [][numJoinTypes]joinState{
|
||||
stateStart: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateVirama,
|
||||
},
|
||||
stateVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
},
|
||||
stateBefore: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
joinZWNJ: stateAfter,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateBeforeVirama,
|
||||
},
|
||||
stateBeforeVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
},
|
||||
stateAfter: {
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateAfter,
|
||||
joiningR: stateStart,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||
},
|
||||
stateFAIL: {
|
||||
0: stateFAIL,
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateFAIL,
|
||||
joiningT: stateFAIL,
|
||||
joiningR: stateFAIL,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateFAIL,
|
||||
},
|
||||
}
|
||||
|
||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||
// already implicitly satisfied by the overall implementation.
|
||||
func (p *Profile) validateLabel(s string) error {
|
||||
if s == "" {
|
||||
if p.verifyDNSLength {
|
||||
return &labelError{s, "A4"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if p.bidirule != nil && !p.bidirule(s) {
|
||||
return &labelError{s, "B"}
|
||||
}
|
||||
if !p.validateLabels {
|
||||
return nil
|
||||
}
|
||||
trie := p.trie // p.validateLabels is only set if trie is set.
|
||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||
return &labelError{s, "V2"}
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return &labelError{s, "V3"}
|
||||
}
|
||||
// TODO: merge the use of this in the trie.
|
||||
v, sz := trie.lookupString(s)
|
||||
x := info(v)
|
||||
if x.isModifier() {
|
||||
return &labelError{s, "V5"}
|
||||
}
|
||||
// Quickly return in the absence of zero-width (non) joiners.
|
||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||
return nil
|
||||
}
|
||||
st := stateStart
|
||||
for i := 0; ; {
|
||||
jt := x.joinType()
|
||||
if s[i:i+sz] == zwj {
|
||||
jt = joinZWJ
|
||||
} else if s[i:i+sz] == zwnj {
|
||||
jt = joinZWNJ
|
||||
}
|
||||
st = joinStates[st][jt]
|
||||
if x.isViramaModifier() {
|
||||
st = joinStates[st][joinVirama]
|
||||
}
|
||||
if i += sz; i == len(s) {
|
||||
break
|
||||
}
|
||||
v, sz = trie.lookupString(s[i:])
|
||||
x = info(v)
|
||||
}
|
||||
if st == stateFAIL || st == stateAfter {
|
||||
return &labelError{s, "C"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
203
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
Normal file
203
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// This file implements the Punycode algorithm from RFC 3492.
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// These parameter values are specified in section 5.
|
||||
//
|
||||
// All computation is done with int32s, so that overflow behavior is identical
|
||||
// regardless of whether int is 32-bit or 64-bit.
|
||||
const (
|
||||
base int32 = 36
|
||||
damp int32 = 700
|
||||
initialBias int32 = 72
|
||||
initialN int32 = 128
|
||||
skew int32 = 38
|
||||
tmax int32 = 26
|
||||
tmin int32 = 1
|
||||
)
|
||||
|
||||
func punyError(s string) error { return &labelError{s, "A3"} }
|
||||
|
||||
// decode decodes a string as specified in section 6.2.
|
||||
func decode(encoded string) (string, error) {
|
||||
if encoded == "" {
|
||||
return "", nil
|
||||
}
|
||||
pos := 1 + strings.LastIndex(encoded, "-")
|
||||
if pos == 1 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
if pos == len(encoded) {
|
||||
return encoded[:len(encoded)-1], nil
|
||||
}
|
||||
output := make([]rune, 0, len(encoded))
|
||||
if pos != 0 {
|
||||
for _, r := range encoded[:pos-1] {
|
||||
output = append(output, r)
|
||||
}
|
||||
}
|
||||
i, n, bias := int32(0), initialN, initialBias
|
||||
for pos < len(encoded) {
|
||||
oldI, w := i, int32(1)
|
||||
for k := base; ; k += base {
|
||||
if pos == len(encoded) {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
digit, ok := decodeDigit(encoded[pos])
|
||||
if !ok {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
pos++
|
||||
i += digit * w
|
||||
if i < 0 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
t := k - bias
|
||||
if t < tmin {
|
||||
t = tmin
|
||||
} else if t > tmax {
|
||||
t = tmax
|
||||
}
|
||||
if digit < t {
|
||||
break
|
||||
}
|
||||
w *= base - t
|
||||
if w >= math.MaxInt32/base {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
}
|
||||
x := int32(len(output) + 1)
|
||||
bias = adapt(i-oldI, x, oldI == 0)
|
||||
n += i / x
|
||||
i %= x
|
||||
if n > utf8.MaxRune || len(output) >= 1024 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
output = append(output, 0)
|
||||
copy(output[i+1:], output[i:])
|
||||
output[i] = n
|
||||
i++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// encode encodes a string as specified in section 6.3 and prepends prefix to
|
||||
// the result.
|
||||
//
|
||||
// The "while h < length(input)" line in the specification becomes "for
|
||||
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
|
||||
func encode(prefix, s string) (string, error) {
|
||||
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
|
||||
copy(output, prefix)
|
||||
delta, n, bias := int32(0), initialN, initialBias
|
||||
b, remaining := int32(0), int32(0)
|
||||
for _, r := range s {
|
||||
if r < 0x80 {
|
||||
b++
|
||||
output = append(output, byte(r))
|
||||
} else {
|
||||
remaining++
|
||||
}
|
||||
}
|
||||
h := b
|
||||
if b > 0 {
|
||||
output = append(output, '-')
|
||||
}
|
||||
for remaining != 0 {
|
||||
m := int32(0x7fffffff)
|
||||
for _, r := range s {
|
||||
if m > r && r >= n {
|
||||
m = r
|
||||
}
|
||||
}
|
||||
delta += (m - n) * (h + 1)
|
||||
if delta < 0 {
|
||||
return "", punyError(s)
|
||||
}
|
||||
n = m
|
||||
for _, r := range s {
|
||||
if r < n {
|
||||
delta++
|
||||
if delta < 0 {
|
||||
return "", punyError(s)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if r > n {
|
||||
continue
|
||||
}
|
||||
q := delta
|
||||
for k := base; ; k += base {
|
||||
t := k - bias
|
||||
if t < tmin {
|
||||
t = tmin
|
||||
} else if t > tmax {
|
||||
t = tmax
|
||||
}
|
||||
if q < t {
|
||||
break
|
||||
}
|
||||
output = append(output, encodeDigit(t+(q-t)%(base-t)))
|
||||
q = (q - t) / (base - t)
|
||||
}
|
||||
output = append(output, encodeDigit(q))
|
||||
bias = adapt(delta, h+1, h == b)
|
||||
delta = 0
|
||||
h++
|
||||
remaining--
|
||||
}
|
||||
delta++
|
||||
n++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
func decodeDigit(x byte) (digit int32, ok bool) {
|
||||
switch {
|
||||
case '0' <= x && x <= '9':
|
||||
return int32(x - ('0' - 26)), true
|
||||
case 'A' <= x && x <= 'Z':
|
||||
return int32(x - 'A'), true
|
||||
case 'a' <= x && x <= 'z':
|
||||
return int32(x - 'a'), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func encodeDigit(digit int32) byte {
|
||||
switch {
|
||||
case 0 <= digit && digit < 26:
|
||||
return byte(digit + 'a')
|
||||
case 26 <= digit && digit < 36:
|
||||
return byte(digit + ('0' - 26))
|
||||
}
|
||||
panic("idna: internal error in punycode encoding")
|
||||
}
|
||||
|
||||
// adapt is the bias adaptation function specified in section 6.1.
|
||||
func adapt(delta, numPoints int32, firstTime bool) int32 {
|
||||
if firstTime {
|
||||
delta /= damp
|
||||
} else {
|
||||
delta /= 2
|
||||
}
|
||||
delta += delta / numPoints
|
||||
k := int32(0)
|
||||
for delta > ((base-tmin)*tmax)/2 {
|
||||
delta /= base - tmin
|
||||
k += base
|
||||
}
|
||||
return k + (base-tmin+1)*delta/(delta+skew)
|
||||
}
|
4559
vendor/golang.org/x/net/idna/tables10.0.0.go
generated
vendored
Normal file
4559
vendor/golang.org/x/net/idna/tables10.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4653
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
Normal file
4653
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4733
vendor/golang.org/x/net/idna/tables12.0.0.go
generated
vendored
Normal file
4733
vendor/golang.org/x/net/idna/tables12.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4839
vendor/golang.org/x/net/idna/tables13.0.0.go
generated
vendored
Normal file
4839
vendor/golang.org/x/net/idna/tables13.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4486
vendor/golang.org/x/net/idna/tables9.0.0.go
generated
vendored
Normal file
4486
vendor/golang.org/x/net/idna/tables9.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
72
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
72
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
||||
func (c info) appendMapping(b []byte, s string) []byte {
|
||||
index := int(c >> indexShift)
|
||||
if c&xorBit == 0 {
|
||||
s := mappings[index:]
|
||||
return append(b, s[1:s[0]+1]...)
|
||||
}
|
||||
b = append(b, s...)
|
||||
if c&inlineXOR == inlineXOR {
|
||||
// TODO: support and handle two-byte inline masks
|
||||
b[len(b)-1] ^= byte(index)
|
||||
} else {
|
||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
||||
index++
|
||||
b[p] ^= xorData[index]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Sparse block handling code.
|
||||
|
||||
type valueRange struct {
|
||||
value uint16 // header: value:stride
|
||||
lo, hi byte // header: lo:n
|
||||
}
|
||||
|
||||
type sparseBlocks struct {
|
||||
values []valueRange
|
||||
offset []uint16
|
||||
}
|
||||
|
||||
var idnaSparse = sparseBlocks{
|
||||
values: idnaSparseValues[:],
|
||||
offset: idnaSparseOffset[:],
|
||||
}
|
||||
|
||||
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
|
||||
var trie = &idnaTrie{}
|
||||
|
||||
// lookup determines the type of block n and looks up the value for b.
|
||||
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
||||
// is a list of ranges with an accompanying value. Given a matching range r,
|
||||
// the value for b is by r.value + (b - r.lo) * stride.
|
||||
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
||||
offset := t.offset[n]
|
||||
header := t.values[offset]
|
||||
lo := offset + 1
|
||||
hi := lo + uint16(header.lo)
|
||||
for lo < hi {
|
||||
m := lo + (hi-lo)/2
|
||||
r := t.values[m]
|
||||
if r.lo <= b && b <= r.hi {
|
||||
return r.value + uint16(b-r.lo)*header.value
|
||||
}
|
||||
if b < r.lo {
|
||||
hi = m
|
||||
} else {
|
||||
lo = m + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
119
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
Normal file
119
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package idna
|
||||
|
||||
// This file contains definitions for interpreting the trie value of the idna
|
||||
// trie generated by "go run gen*.go". It is shared by both the generator
|
||||
// program and the resultant package. Sharing is achieved by the generator
|
||||
// copying gen_trieval.go to trieval.go and changing what's above this comment.
|
||||
|
||||
// info holds information from the IDNA mapping table for a single rune. It is
|
||||
// the value returned by a trie lookup. In most cases, all information fits in
|
||||
// a 16-bit value. For mappings, this value may contain an index into a slice
|
||||
// with the mapped string. Such mappings can consist of the actual mapped value
|
||||
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
|
||||
// input rune. This technique is used by the cases packages and reduces the
|
||||
// table size significantly.
|
||||
//
|
||||
// The per-rune values have the following format:
|
||||
//
|
||||
// if mapped {
|
||||
// if inlinedXOR {
|
||||
// 15..13 inline XOR marker
|
||||
// 12..11 unused
|
||||
// 10..3 inline XOR mask
|
||||
// } else {
|
||||
// 15..3 index into xor or mapping table
|
||||
// }
|
||||
// } else {
|
||||
// 15..14 unused
|
||||
// 13 mayNeedNorm
|
||||
// 12..11 attributes
|
||||
// 10..8 joining type
|
||||
// 7..3 category type
|
||||
// }
|
||||
// 2 use xor pattern
|
||||
// 1..0 mapped category
|
||||
//
|
||||
// See the definitions below for a more detailed description of the various
|
||||
// bits.
|
||||
type info uint16
|
||||
|
||||
const (
|
||||
catSmallMask = 0x3
|
||||
catBigMask = 0xF8
|
||||
indexShift = 3
|
||||
xorBit = 0x4 // interpret the index as an xor pattern
|
||||
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
|
||||
|
||||
joinShift = 8
|
||||
joinMask = 0x07
|
||||
|
||||
// Attributes
|
||||
attributesMask = 0x1800
|
||||
viramaModifier = 0x1800
|
||||
modifier = 0x1000
|
||||
rtl = 0x0800
|
||||
|
||||
mayNeedNorm = 0x2000
|
||||
)
|
||||
|
||||
// A category corresponds to a category defined in the IDNA mapping table.
|
||||
type category uint16
|
||||
|
||||
const (
|
||||
unknown category = 0 // not currently defined in unicode.
|
||||
mapped category = 1
|
||||
disallowedSTD3Mapped category = 2
|
||||
deviation category = 3
|
||||
)
|
||||
|
||||
const (
|
||||
valid category = 0x08
|
||||
validNV8 category = 0x18
|
||||
validXV8 category = 0x28
|
||||
disallowed category = 0x40
|
||||
disallowedSTD3Valid category = 0x80
|
||||
ignored category = 0xC0
|
||||
)
|
||||
|
||||
// join types and additional rune information
|
||||
const (
|
||||
joiningL = (iota + 1)
|
||||
joiningD
|
||||
joiningT
|
||||
joiningR
|
||||
|
||||
//the following types are derived during processing
|
||||
joinZWJ
|
||||
joinZWNJ
|
||||
joinVirama
|
||||
numJoinTypes
|
||||
)
|
||||
|
||||
func (c info) isMapped() bool {
|
||||
return c&0x3 != 0
|
||||
}
|
||||
|
||||
func (c info) category() category {
|
||||
small := c & catSmallMask
|
||||
if small != 0 {
|
||||
return category(small)
|
||||
}
|
||||
return category(c & catBigMask)
|
||||
}
|
||||
|
||||
func (c info) joinType() info {
|
||||
if c.isMapped() {
|
||||
return 0
|
||||
}
|
||||
return (c >> joinShift) & joinMask
|
||||
}
|
||||
|
||||
func (c info) isModifier() bool {
|
||||
return c&(modifier|catSmallMask) == modifier
|
||||
}
|
||||
|
||||
func (c info) isViramaModifier() bool {
|
||||
return c&(attributesMask|catSmallMask) == viramaModifier
|
||||
}
|
223
vendor/golang.org/x/net/internal/iana/const.go
generated
vendored
Normal file
223
vendor/golang.org/x/net/internal/iana/const.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
// go generate gen.go
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).
|
||||
package iana // import "golang.org/x/net/internal/iana"
|
||||
|
||||
// Differentiated Services Field Codepoints (DSCP), Updated: 2018-05-04
|
||||
const (
|
||||
DiffServCS0 = 0x00 // CS0
|
||||
DiffServCS1 = 0x20 // CS1
|
||||
DiffServCS2 = 0x40 // CS2
|
||||
DiffServCS3 = 0x60 // CS3
|
||||
DiffServCS4 = 0x80 // CS4
|
||||
DiffServCS5 = 0xa0 // CS5
|
||||
DiffServCS6 = 0xc0 // CS6
|
||||
DiffServCS7 = 0xe0 // CS7
|
||||
DiffServAF11 = 0x28 // AF11
|
||||
DiffServAF12 = 0x30 // AF12
|
||||
DiffServAF13 = 0x38 // AF13
|
||||
DiffServAF21 = 0x48 // AF21
|
||||
DiffServAF22 = 0x50 // AF22
|
||||
DiffServAF23 = 0x58 // AF23
|
||||
DiffServAF31 = 0x68 // AF31
|
||||
DiffServAF32 = 0x70 // AF32
|
||||
DiffServAF33 = 0x78 // AF33
|
||||
DiffServAF41 = 0x88 // AF41
|
||||
DiffServAF42 = 0x90 // AF42
|
||||
DiffServAF43 = 0x98 // AF43
|
||||
DiffServEF = 0xb8 // EF
|
||||
DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT
|
||||
NotECNTransport = 0x00 // Not-ECT (Not ECN-Capable Transport)
|
||||
ECNTransport1 = 0x01 // ECT(1) (ECN-Capable Transport(1))
|
||||
ECNTransport0 = 0x02 // ECT(0) (ECN-Capable Transport(0))
|
||||
CongestionExperienced = 0x03 // CE (Congestion Experienced)
|
||||
)
|
||||
|
||||
// Protocol Numbers, Updated: 2017-10-13
|
||||
const (
|
||||
ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number
|
||||
ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option
|
||||
ProtocolICMP = 1 // Internet Control Message
|
||||
ProtocolIGMP = 2 // Internet Group Management
|
||||
ProtocolGGP = 3 // Gateway-to-Gateway
|
||||
ProtocolIPv4 = 4 // IPv4 encapsulation
|
||||
ProtocolST = 5 // Stream
|
||||
ProtocolTCP = 6 // Transmission Control
|
||||
ProtocolCBT = 7 // CBT
|
||||
ProtocolEGP = 8 // Exterior Gateway Protocol
|
||||
ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
|
||||
ProtocolBBNRCCMON = 10 // BBN RCC Monitoring
|
||||
ProtocolNVPII = 11 // Network Voice Protocol
|
||||
ProtocolPUP = 12 // PUP
|
||||
ProtocolEMCON = 14 // EMCON
|
||||
ProtocolXNET = 15 // Cross Net Debugger
|
||||
ProtocolCHAOS = 16 // Chaos
|
||||
ProtocolUDP = 17 // User Datagram
|
||||
ProtocolMUX = 18 // Multiplexing
|
||||
ProtocolDCNMEAS = 19 // DCN Measurement Subsystems
|
||||
ProtocolHMP = 20 // Host Monitoring
|
||||
ProtocolPRM = 21 // Packet Radio Measurement
|
||||
ProtocolXNSIDP = 22 // XEROX NS IDP
|
||||
ProtocolTRUNK1 = 23 // Trunk-1
|
||||
ProtocolTRUNK2 = 24 // Trunk-2
|
||||
ProtocolLEAF1 = 25 // Leaf-1
|
||||
ProtocolLEAF2 = 26 // Leaf-2
|
||||
ProtocolRDP = 27 // Reliable Data Protocol
|
||||
ProtocolIRTP = 28 // Internet Reliable Transaction
|
||||
ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4
|
||||
ProtocolNETBLT = 30 // Bulk Data Transfer Protocol
|
||||
ProtocolMFENSP = 31 // MFE Network Services Protocol
|
||||
ProtocolMERITINP = 32 // MERIT Internodal Protocol
|
||||
ProtocolDCCP = 33 // Datagram Congestion Control Protocol
|
||||
Protocol3PC = 34 // Third Party Connect Protocol
|
||||
ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol
|
||||
ProtocolXTP = 36 // XTP
|
||||
ProtocolDDP = 37 // Datagram Delivery Protocol
|
||||
ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto
|
||||
ProtocolTPPP = 39 // TP++ Transport Protocol
|
||||
ProtocolIL = 40 // IL Transport Protocol
|
||||
ProtocolIPv6 = 41 // IPv6 encapsulation
|
||||
ProtocolSDRP = 42 // Source Demand Routing Protocol
|
||||
ProtocolIPv6Route = 43 // Routing Header for IPv6
|
||||
ProtocolIPv6Frag = 44 // Fragment Header for IPv6
|
||||
ProtocolIDRP = 45 // Inter-Domain Routing Protocol
|
||||
ProtocolRSVP = 46 // Reservation Protocol
|
||||
ProtocolGRE = 47 // Generic Routing Encapsulation
|
||||
ProtocolDSR = 48 // Dynamic Source Routing Protocol
|
||||
ProtocolBNA = 49 // BNA
|
||||
ProtocolESP = 50 // Encap Security Payload
|
||||
ProtocolAH = 51 // Authentication Header
|
||||
ProtocolINLSP = 52 // Integrated Net Layer Security TUBA
|
||||
ProtocolNARP = 54 // NBMA Address Resolution Protocol
|
||||
ProtocolMOBILE = 55 // IP Mobility
|
||||
ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management
|
||||
ProtocolSKIP = 57 // SKIP
|
||||
ProtocolIPv6ICMP = 58 // ICMP for IPv6
|
||||
ProtocolIPv6NoNxt = 59 // No Next Header for IPv6
|
||||
ProtocolIPv6Opts = 60 // Destination Options for IPv6
|
||||
ProtocolCFTP = 62 // CFTP
|
||||
ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK
|
||||
ProtocolKRYPTOLAN = 65 // Kryptolan
|
||||
ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol
|
||||
ProtocolIPPC = 67 // Internet Pluribus Packet Core
|
||||
ProtocolSATMON = 69 // SATNET Monitoring
|
||||
ProtocolVISA = 70 // VISA Protocol
|
||||
ProtocolIPCV = 71 // Internet Packet Core Utility
|
||||
ProtocolCPNX = 72 // Computer Protocol Network Executive
|
||||
ProtocolCPHB = 73 // Computer Protocol Heart Beat
|
||||
ProtocolWSN = 74 // Wang Span Network
|
||||
ProtocolPVP = 75 // Packet Video Protocol
|
||||
ProtocolBRSATMON = 76 // Backroom SATNET Monitoring
|
||||
ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary
|
||||
ProtocolWBMON = 78 // WIDEBAND Monitoring
|
||||
ProtocolWBEXPAK = 79 // WIDEBAND EXPAK
|
||||
ProtocolISOIP = 80 // ISO Internet Protocol
|
||||
ProtocolVMTP = 81 // VMTP
|
||||
ProtocolSECUREVMTP = 82 // SECURE-VMTP
|
||||
ProtocolVINES = 83 // VINES
|
||||
ProtocolTTP = 84 // Transaction Transport Protocol
|
||||
ProtocolIPTM = 84 // Internet Protocol Traffic Manager
|
||||
ProtocolNSFNETIGP = 85 // NSFNET-IGP
|
||||
ProtocolDGP = 86 // Dissimilar Gateway Protocol
|
||||
ProtocolTCF = 87 // TCF
|
||||
ProtocolEIGRP = 88 // EIGRP
|
||||
ProtocolOSPFIGP = 89 // OSPFIGP
|
||||
ProtocolSpriteRPC = 90 // Sprite RPC Protocol
|
||||
ProtocolLARP = 91 // Locus Address Resolution Protocol
|
||||
ProtocolMTP = 92 // Multicast Transport Protocol
|
||||
ProtocolAX25 = 93 // AX.25 Frames
|
||||
ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol
|
||||
ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro.
|
||||
ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation
|
||||
ProtocolENCAP = 98 // Encapsulation Header
|
||||
ProtocolGMTP = 100 // GMTP
|
||||
ProtocolIFMP = 101 // Ipsilon Flow Management Protocol
|
||||
ProtocolPNNI = 102 // PNNI over IP
|
||||
ProtocolPIM = 103 // Protocol Independent Multicast
|
||||
ProtocolARIS = 104 // ARIS
|
||||
ProtocolSCPS = 105 // SCPS
|
||||
ProtocolQNX = 106 // QNX
|
||||
ProtocolAN = 107 // Active Networks
|
||||
ProtocolIPComp = 108 // IP Payload Compression Protocol
|
||||
ProtocolSNP = 109 // Sitara Networks Protocol
|
||||
ProtocolCompaqPeer = 110 // Compaq Peer Protocol
|
||||
ProtocolIPXinIP = 111 // IPX in IP
|
||||
ProtocolVRRP = 112 // Virtual Router Redundancy Protocol
|
||||
ProtocolPGM = 113 // PGM Reliable Transport Protocol
|
||||
ProtocolL2TP = 115 // Layer Two Tunneling Protocol
|
||||
ProtocolDDX = 116 // D-II Data Exchange (DDX)
|
||||
ProtocolIATP = 117 // Interactive Agent Transfer Protocol
|
||||
ProtocolSTP = 118 // Schedule Transfer Protocol
|
||||
ProtocolSRP = 119 // SpectraLink Radio Protocol
|
||||
ProtocolUTI = 120 // UTI
|
||||
ProtocolSMP = 121 // Simple Message Protocol
|
||||
ProtocolPTP = 123 // Performance Transparency Protocol
|
||||
ProtocolISIS = 124 // ISIS over IPv4
|
||||
ProtocolFIRE = 125 // FIRE
|
||||
ProtocolCRTP = 126 // Combat Radio Transport Protocol
|
||||
ProtocolCRUDP = 127 // Combat Radio User Datagram
|
||||
ProtocolSSCOPMCE = 128 // SSCOPMCE
|
||||
ProtocolIPLT = 129 // IPLT
|
||||
ProtocolSPS = 130 // Secure Packet Shield
|
||||
ProtocolPIPE = 131 // Private IP Encapsulation within IP
|
||||
ProtocolSCTP = 132 // Stream Control Transmission Protocol
|
||||
ProtocolFC = 133 // Fibre Channel
|
||||
ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE
|
||||
ProtocolMobilityHeader = 135 // Mobility Header
|
||||
ProtocolUDPLite = 136 // UDPLite
|
||||
ProtocolMPLSinIP = 137 // MPLS-in-IP
|
||||
ProtocolMANET = 138 // MANET Protocols
|
||||
ProtocolHIP = 139 // Host Identity Protocol
|
||||
ProtocolShim6 = 140 // Shim6 Protocol
|
||||
ProtocolWESP = 141 // Wrapped Encapsulating Security Payload
|
||||
ProtocolROHC = 142 // Robust Header Compression
|
||||
ProtocolReserved = 255 // Reserved
|
||||
)
|
||||
|
||||
// Address Family Numbers, Updated: 2018-04-02
|
||||
const (
|
||||
AddrFamilyIPv4 = 1 // IP (IP version 4)
|
||||
AddrFamilyIPv6 = 2 // IP6 (IP version 6)
|
||||
AddrFamilyNSAP = 3 // NSAP
|
||||
AddrFamilyHDLC = 4 // HDLC (8-bit multidrop)
|
||||
AddrFamilyBBN1822 = 5 // BBN 1822
|
||||
AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format")
|
||||
AddrFamilyE163 = 7 // E.163
|
||||
AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM)
|
||||
AddrFamilyF69 = 9 // F.69 (Telex)
|
||||
AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay)
|
||||
AddrFamilyIPX = 11 // IPX
|
||||
AddrFamilyAppletalk = 12 // Appletalk
|
||||
AddrFamilyDecnetIV = 13 // Decnet IV
|
||||
AddrFamilyBanyanVines = 14 // Banyan Vines
|
||||
AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress
|
||||
AddrFamilyDNS = 16 // DNS (Domain Name System)
|
||||
AddrFamilyDistinguishedName = 17 // Distinguished Name
|
||||
AddrFamilyASNumber = 18 // AS Number
|
||||
AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4
|
||||
AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6
|
||||
AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP
|
||||
AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name
|
||||
AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name
|
||||
AddrFamilyGWID = 24 // GWID
|
||||
AddrFamilyL2VPN = 25 // AFI for L2VPN information
|
||||
AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier
|
||||
AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier
|
||||
AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier
|
||||
AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4
|
||||
AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6
|
||||
AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family
|
||||
AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family
|
||||
AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family
|
||||
AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF)
|
||||
AddrFamilyBGPLS = 16388 // BGP-LS
|
||||
AddrFamily48bitMAC = 16389 // 48-bit MAC
|
||||
AddrFamily64bitMAC = 16390 // 64-bit MAC
|
||||
AddrFamilyOUI = 16391 // OUI
|
||||
AddrFamilyMACFinal24bits = 16392 // MAC/24
|
||||
AddrFamilyMACFinal40bits = 16393 // MAC/40
|
||||
AddrFamilyIPv6Initial64bits = 16394 // IPv6/64
|
||||
AddrFamilyRBridgePortID = 16395 // RBridge Port ID
|
||||
AddrFamilyTRILLNickname = 16396 // TRILL Nickname
|
||||
)
|
11
vendor/golang.org/x/net/internal/socket/cmsghdr.go
generated
vendored
Normal file
11
vendor/golang.org/x/net/internal/socket/cmsghdr.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
|
||||
|
||||
package socket
|
||||
|
||||
func (h *cmsghdr) len() int { return int(h.Len) }
|
||||
func (h *cmsghdr) lvl() int { return int(h.Level) }
|
||||
func (h *cmsghdr) typ() int { return int(h.Type) }
|
13
vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go
generated
vendored
Normal file
13
vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build aix darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package socket
|
||||
|
||||
func (h *cmsghdr) set(l, lvl, typ int) {
|
||||
h.Len = uint32(l)
|
||||
h.Level = int32(lvl)
|
||||
h.Type = int32(typ)
|
||||
}
|
14
vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go
generated
vendored
Normal file
14
vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build arm mips mipsle 386
|
||||
// +build linux
|
||||
|
||||
package socket
|
||||
|
||||
func (h *cmsghdr) set(l, lvl, typ int) {
|
||||
h.Len = uint32(l)
|
||||
h.Level = int32(lvl)
|
||||
h.Type = int32(typ)
|
||||
}
|
14
vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go
generated
vendored
Normal file
14
vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x
|
||||
// +build linux
|
||||
|
||||
package socket
|
||||
|
||||
func (h *cmsghdr) set(l, lvl, typ int) {
|
||||
h.Len = uint64(l)
|
||||
h.Level = int32(lvl)
|
||||
h.Type = int32(typ)
|
||||
}
|
14
vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go
generated
vendored
Normal file
14
vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64
|
||||
// +build solaris
|
||||
|
||||
package socket
|
||||
|
||||
func (h *cmsghdr) set(l, lvl, typ int) {
|
||||
h.Len = uint32(l)
|
||||
h.Level = int32(lvl)
|
||||
h.Type = int32(typ)
|
||||
}
|
27
vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go
generated
vendored
Normal file
27
vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos
|
||||
|
||||
package socket
|
||||
|
||||
func controlHeaderLen() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func controlMessageLen(dataLen int) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func controlMessageSpace(dataLen int) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
type cmsghdr struct{}
|
||||
|
||||
func (h *cmsghdr) len() int { return 0 }
|
||||
func (h *cmsghdr) lvl() int { return 0 }
|
||||
func (h *cmsghdr) typ() int { return 0 }
|
||||
|
||||
func (h *cmsghdr) set(l, lvl, typ int) {}
|
21
vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go
generated
vendored
Normal file
21
vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package socket
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func controlHeaderLen() int {
|
||||
return unix.CmsgLen(0)
|
||||
}
|
||||
|
||||
func controlMessageLen(dataLen int) int {
|
||||
return unix.CmsgLen(dataLen)
|
||||
}
|
||||
|
||||
func controlMessageSpace(dataLen int) int {
|
||||
return unix.CmsgSpace(dataLen)
|
||||
}
|
25
vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go
generated
vendored
Normal file
25
vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package socket
|
||||
|
||||
import "syscall"
|
||||
|
||||
func (h *cmsghdr) set(l, lvl, typ int) {
|
||||
h.Len = int32(l)
|
||||
h.Level = int32(lvl)
|
||||
h.Type = int32(typ)
|
||||
}
|
||||
|
||||
func controlHeaderLen() int {
|
||||
return syscall.CmsgLen(0)
|
||||
}
|
||||
|
||||
func controlMessageLen(dataLen int) int {
|
||||
return syscall.CmsgLen(dataLen)
|
||||
}
|
||||
|
||||
func controlMessageSpace(dataLen int) int {
|
||||
return syscall.CmsgSpace(dataLen)
|
||||
}
|
7
vendor/golang.org/x/net/internal/socket/empty.s
generated
vendored
Normal file
7
vendor/golang.org/x/net/internal/socket/empty.s
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin,go1.12
|
||||
|
||||
// This exists solely so we can linkname in symbols from syscall.
|
31
vendor/golang.org/x/net/internal/socket/error_unix.go
generated
vendored
Normal file
31
vendor/golang.org/x/net/internal/socket/error_unix.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
|
||||
|
||||
package socket
|
||||
|
||||
import "syscall"
|
||||
|
||||
var (
|
||||
errEAGAIN error = syscall.EAGAIN
|
||||
errEINVAL error = syscall.EINVAL
|
||||
errENOENT error = syscall.ENOENT
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent allocations
|
||||
// at runtime.
|
||||
func errnoErr(errno syscall.Errno) error {
|
||||
switch errno {
|
||||
case 0:
|
||||
return nil
|
||||
case syscall.EAGAIN:
|
||||
return errEAGAIN
|
||||
case syscall.EINVAL:
|
||||
return errEINVAL
|
||||
case syscall.ENOENT:
|
||||
return errENOENT
|
||||
}
|
||||
return errno
|
||||
}
|
26
vendor/golang.org/x/net/internal/socket/error_windows.go
generated
vendored
Normal file
26
vendor/golang.org/x/net/internal/socket/error_windows.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package socket
|
||||
|
||||
import "syscall"
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING
|
||||
errEINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent allocations
|
||||
// at runtime.
|
||||
func errnoErr(errno syscall.Errno) error {
|
||||
switch errno {
|
||||
case 0:
|
||||
return nil
|
||||
case syscall.ERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
case syscall.EINVAL:
|
||||
return errEINVAL
|
||||
}
|
||||
return errno
|
||||
}
|
19
vendor/golang.org/x/net/internal/socket/iovec_32bit.go
generated
vendored
Normal file
19
vendor/golang.org/x/net/internal/socket/iovec_32bit.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build arm mips mipsle 386
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package socket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func (v *iovec) set(b []byte) {
|
||||
l := len(b)
|
||||
if l == 0 {
|
||||
return
|
||||
}
|
||||
v.Base = (*byte)(unsafe.Pointer(&b[0]))
|
||||
v.Len = uint32(l)
|
||||
}
|
19
vendor/golang.org/x/net/internal/socket/iovec_64bit.go
generated
vendored
Normal file
19
vendor/golang.org/x/net/internal/socket/iovec_64bit.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd zos
|
||||
|
||||
package socket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func (v *iovec) set(b []byte) {
|
||||
l := len(b)
|
||||
if l == 0 {
|
||||
return
|
||||
}
|
||||
v.Base = (*byte)(unsafe.Pointer(&b[0]))
|
||||
v.Len = uint64(l)
|
||||
}
|
19
vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go
generated
vendored
Normal file
19
vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64
|
||||
// +build solaris
|
||||
|
||||
package socket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func (v *iovec) set(b []byte) {
|
||||
l := len(b)
|
||||
if l == 0 {
|
||||
return
|
||||
}
|
||||
v.Base = (*int8)(unsafe.Pointer(&b[0]))
|
||||
v.Len = uint64(l)
|
||||
}
|
11
vendor/golang.org/x/net/internal/socket/iovec_stub.go
generated
vendored
Normal file
11
vendor/golang.org/x/net/internal/socket/iovec_stub.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos
|
||||
|
||||
package socket
|
||||
|
||||
type iovec struct{}
|
||||
|
||||
func (v *iovec) set(b []byte) {}
|
21
vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go
generated
vendored
Normal file
21
vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !aix,!linux,!netbsd
|
||||
|
||||
package socket
|
||||
|
||||
import "net"
|
||||
|
||||
type mmsghdr struct{}
|
||||
|
||||
type mmsghdrs []mmsghdr
|
||||
|
||||
func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error {
|
||||
return nil
|
||||
}
|
42
vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go
generated
vendored
Normal file
42
vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build aix linux netbsd
|
||||
|
||||
package socket
|
||||
|
||||
import "net"
|
||||
|
||||
type mmsghdrs []mmsghdr
|
||||
|
||||
func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error {
|
||||
for i := range hs {
|
||||
vs := make([]iovec, len(ms[i].Buffers))
|
||||
var sa []byte
|
||||
if parseFn != nil {
|
||||
sa = make([]byte, sizeofSockaddrInet6)
|
||||
}
|
||||
if marshalFn != nil {
|
||||
sa = marshalFn(ms[i].Addr)
|
||||
}
|
||||
hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error {
|
||||
for i := range hs {
|
||||
ms[i].N = int(hs[i].Len)
|
||||
ms[i].NN = hs[i].Hdr.controllen()
|
||||
ms[i].Flags = hs[i].Hdr.flags()
|
||||
if parseFn != nil {
|
||||
var err error
|
||||
ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user